VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 93133

Last change on this file since 93133 was 93133, checked in by vboxsync, 3 years ago

VMM,{HMVMXR0.cpp,VMXTemplate.cpp.h}: Make use of the VMX template code in HM, getting rid of the temporary code duplication, bugref:10136 [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 428.6 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 93133 2022-01-06 13:09:49Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/**
42 * Flags to skip redundant reads of some common VMCS fields that are not part of
43 * the guest-CPU or VCPU state but are needed while handling VM-exits.
44 */
45#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
46#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
47#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
48#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
49#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
50#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
51#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
52#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
53#define HMVMX_READ_GUEST_PHYSICAL_ADDR RT_BIT_32(8)
54#define HMVMX_READ_GUEST_PENDING_DBG_XCPTS RT_BIT_32(9)
55
56/** All the VMCS fields required for processing of exception/NMI VM-exits. */
57#define HMVMX_READ_XCPT_INFO ( HMVMX_READ_EXIT_INTERRUPTION_INFO \
58 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
59 | HMVMX_READ_EXIT_INSTR_LEN \
60 | HMVMX_READ_IDT_VECTORING_INFO \
61 | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
62
63/** Assert that all the given fields have been read from the VMCS. */
64#ifdef VBOX_STRICT
65# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
66 do { \
67 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
68 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
69 } while (0)
70#else
71# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
72#endif
73
74/**
75 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
76 * guest using hardware-assisted VMX.
77 *
78 * This excludes state like GPRs (other than RSP) which are always are
79 * swapped and restored across the world-switch and also registers like EFER,
80 * MSR which cannot be modified by the guest without causing a VM-exit.
81 */
82#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
83 | CPUMCTX_EXTRN_RFLAGS \
84 | CPUMCTX_EXTRN_RSP \
85 | CPUMCTX_EXTRN_SREG_MASK \
86 | CPUMCTX_EXTRN_TABLE_MASK \
87 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
88 | CPUMCTX_EXTRN_SYSCALL_MSRS \
89 | CPUMCTX_EXTRN_SYSENTER_MSRS \
90 | CPUMCTX_EXTRN_TSC_AUX \
91 | CPUMCTX_EXTRN_OTHER_MSRS \
92 | CPUMCTX_EXTRN_CR0 \
93 | CPUMCTX_EXTRN_CR3 \
94 | CPUMCTX_EXTRN_CR4 \
95 | CPUMCTX_EXTRN_DR7 \
96 | CPUMCTX_EXTRN_HWVIRT \
97 | CPUMCTX_EXTRN_INHIBIT_INT \
98 | CPUMCTX_EXTRN_INHIBIT_NMI)
99
100/**
101 * Exception bitmap mask for real-mode guests (real-on-v86).
102 *
103 * We need to intercept all exceptions manually except:
104 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
105 * due to bugs in Intel CPUs.
106 * - \#PF need not be intercepted even in real-mode if we have nested paging
107 * support.
108 */
109#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
110 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
111 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
112 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
113 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
114 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
115 | RT_BIT(X86_XCPT_XF))
116
117/** Maximum VM-instruction error number. */
118#define HMVMX_INSTR_ERROR_MAX 28
119
120/** Profiling macro. */
121#ifdef HM_PROFILE_EXIT_DISPATCH
122# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
123# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
124#else
125# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
126# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
127#endif
128
129#ifndef IN_NEM_DARWIN
130/** Assert that preemption is disabled or covered by thread-context hooks. */
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
132 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
133
134/** Assert that we haven't migrated CPUs when thread-context hooks are not
135 * used. */
136# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
137 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
138 ("Illegal migration! Entered on CPU %u Current %u\n", \
139 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
140#else
141# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
142# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
143#endif
144
145/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
146 * context. */
147#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
148 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
149 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
150
151/** Log the VM-exit reason with an easily visible marker to identify it in a
152 * potential sea of logging data. */
153#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
154 do { \
155 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
156 HMGetVmxExitName(a_uExitReason))); \
157 } while (0) \
158
159
160/*********************************************************************************************************************************
161* Structures and Typedefs *
162*********************************************************************************************************************************/
163/**
164 * Memory operand read or write access.
165 */
166typedef enum VMXMEMACCESS
167{
168 VMXMEMACCESS_READ = 0,
169 VMXMEMACCESS_WRITE = 1
170} VMXMEMACCESS;
171
172
173/**
174 * VMX VM-exit handler.
175 *
176 * @returns Strict VBox status code (i.e. informational status codes too).
177 * @param pVCpu The cross context virtual CPU structure.
178 * @param pVmxTransient The VMX-transient structure.
179 */
180#ifndef HMVMX_USE_FUNCTION_TABLE
181typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
182#else
183typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
184/** Pointer to VM-exit handler. */
185typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
186#endif
187
188/**
189 * VMX VM-exit handler, non-strict status code.
190 *
191 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
192 *
193 * @returns VBox status code, no informational status code returned.
194 * @param pVCpu The cross context virtual CPU structure.
195 * @param pVmxTransient The VMX-transient structure.
196 *
197 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
198 * use of that status code will be replaced with VINF_EM_SOMETHING
199 * later when switching over to IEM.
200 */
201#ifndef HMVMX_USE_FUNCTION_TABLE
202typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203#else
204typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
205#endif
206
207
208/*********************************************************************************************************************************
209* Internal Functions *
210*********************************************************************************************************************************/
211#ifndef HMVMX_USE_FUNCTION_TABLE
212DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
213# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
214# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
215#else
216# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
217# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
218#endif
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
221#endif
222
223static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
224
225/** @name VM-exit handler prototypes.
226 * @{
227 */
228static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
229static FNVMXEXITHANDLER vmxHCExitExtInt;
230static FNVMXEXITHANDLER vmxHCExitTripleFault;
231static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
232static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
233static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
234static FNVMXEXITHANDLER vmxHCExitCpuid;
235static FNVMXEXITHANDLER vmxHCExitGetsec;
236static FNVMXEXITHANDLER vmxHCExitHlt;
237static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
238static FNVMXEXITHANDLER vmxHCExitInvlpg;
239static FNVMXEXITHANDLER vmxHCExitRdpmc;
240static FNVMXEXITHANDLER vmxHCExitVmcall;
241#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
242static FNVMXEXITHANDLER vmxHCExitVmclear;
243static FNVMXEXITHANDLER vmxHCExitVmlaunch;
244static FNVMXEXITHANDLER vmxHCExitVmptrld;
245static FNVMXEXITHANDLER vmxHCExitVmptrst;
246static FNVMXEXITHANDLER vmxHCExitVmread;
247static FNVMXEXITHANDLER vmxHCExitVmresume;
248static FNVMXEXITHANDLER vmxHCExitVmwrite;
249static FNVMXEXITHANDLER vmxHCExitVmxoff;
250static FNVMXEXITHANDLER vmxHCExitVmxon;
251static FNVMXEXITHANDLER vmxHCExitInvvpid;
252#endif
253static FNVMXEXITHANDLER vmxHCExitRdtsc;
254static FNVMXEXITHANDLER vmxHCExitMovCRx;
255static FNVMXEXITHANDLER vmxHCExitMovDRx;
256static FNVMXEXITHANDLER vmxHCExitIoInstr;
257static FNVMXEXITHANDLER vmxHCExitRdmsr;
258static FNVMXEXITHANDLER vmxHCExitWrmsr;
259static FNVMXEXITHANDLER vmxHCExitMwait;
260static FNVMXEXITHANDLER vmxHCExitMtf;
261static FNVMXEXITHANDLER vmxHCExitMonitor;
262static FNVMXEXITHANDLER vmxHCExitPause;
263static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
264static FNVMXEXITHANDLER vmxHCExitApicAccess;
265static FNVMXEXITHANDLER vmxHCExitEptViolation;
266static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
267static FNVMXEXITHANDLER vmxHCExitRdtscp;
268static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
269static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
270static FNVMXEXITHANDLER vmxHCExitXsetbv;
271static FNVMXEXITHANDLER vmxHCExitInvpcid;
272static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
273static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
274static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
275/** @} */
276
277#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
278/** @name Nested-guest VM-exit handler prototypes.
279 * @{
280 */
281static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
282static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
283static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
284static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
285static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
286static FNVMXEXITHANDLER vmxHCExitHltNested;
287static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
288static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
289static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
290static FNVMXEXITHANDLER vmxHCExitRdtscNested;
291static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
292static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
293static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
294static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
295static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
296static FNVMXEXITHANDLER vmxHCExitMwaitNested;
297static FNVMXEXITHANDLER vmxHCExitMtfNested;
298static FNVMXEXITHANDLER vmxHCExitMonitorNested;
299static FNVMXEXITHANDLER vmxHCExitPauseNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
301static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
302static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
303static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
304static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
305static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
306static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
307static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
308static FNVMXEXITHANDLER vmxHCExitInstrNested;
309static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
310/** @} */
311#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
312
313
314/*********************************************************************************************************************************
315* Global Variables *
316*********************************************************************************************************************************/
317#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
318/**
319 * Array of all VMCS fields.
320 * Any fields added to the VT-x spec. should be added here.
321 *
322 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
323 * of nested-guests.
324 */
325static const uint32_t g_aVmcsFields[] =
326{
327 /* 16-bit control fields. */
328 VMX_VMCS16_VPID,
329 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
330 VMX_VMCS16_EPTP_INDEX,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410
411 /* 64-bit read-only data fields. */
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
413 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
414
415 /* 64-bit guest-state fields. */
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
417 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
418 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
419 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
420 VMX_VMCS64_GUEST_PAT_FULL,
421 VMX_VMCS64_GUEST_PAT_HIGH,
422 VMX_VMCS64_GUEST_EFER_FULL,
423 VMX_VMCS64_GUEST_EFER_HIGH,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
425 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
426 VMX_VMCS64_GUEST_PDPTE0_FULL,
427 VMX_VMCS64_GUEST_PDPTE0_HIGH,
428 VMX_VMCS64_GUEST_PDPTE1_FULL,
429 VMX_VMCS64_GUEST_PDPTE1_HIGH,
430 VMX_VMCS64_GUEST_PDPTE2_FULL,
431 VMX_VMCS64_GUEST_PDPTE2_HIGH,
432 VMX_VMCS64_GUEST_PDPTE3_FULL,
433 VMX_VMCS64_GUEST_PDPTE3_HIGH,
434 VMX_VMCS64_GUEST_BNDCFGS_FULL,
435 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
436 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
437 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
438 VMX_VMCS64_GUEST_PKRS_FULL,
439 VMX_VMCS64_GUEST_PKRS_HIGH,
440
441 /* 64-bit host-state fields. */
442 VMX_VMCS64_HOST_PAT_FULL,
443 VMX_VMCS64_HOST_PAT_HIGH,
444 VMX_VMCS64_HOST_EFER_FULL,
445 VMX_VMCS64_HOST_EFER_HIGH,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
447 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
448 VMX_VMCS64_HOST_PKRS_FULL,
449 VMX_VMCS64_HOST_PKRS_HIGH,
450
451 /* 32-bit control fields. */
452 VMX_VMCS32_CTRL_PIN_EXEC,
453 VMX_VMCS32_CTRL_PROC_EXEC,
454 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
456 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
457 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
458 VMX_VMCS32_CTRL_EXIT,
459 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
460 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
461 VMX_VMCS32_CTRL_ENTRY,
462 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
463 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
464 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
465 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
466 VMX_VMCS32_CTRL_TPR_THRESHOLD,
467 VMX_VMCS32_CTRL_PROC_EXEC2,
468 VMX_VMCS32_CTRL_PLE_GAP,
469 VMX_VMCS32_CTRL_PLE_WINDOW,
470
471 /* 32-bits read-only fields. */
472 VMX_VMCS32_RO_VM_INSTR_ERROR,
473 VMX_VMCS32_RO_EXIT_REASON,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
475 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
476 VMX_VMCS32_RO_IDT_VECTORING_INFO,
477 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
478 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
479 VMX_VMCS32_RO_EXIT_INSTR_INFO,
480
481 /* 32-bit guest-state fields. */
482 VMX_VMCS32_GUEST_ES_LIMIT,
483 VMX_VMCS32_GUEST_CS_LIMIT,
484 VMX_VMCS32_GUEST_SS_LIMIT,
485 VMX_VMCS32_GUEST_DS_LIMIT,
486 VMX_VMCS32_GUEST_FS_LIMIT,
487 VMX_VMCS32_GUEST_GS_LIMIT,
488 VMX_VMCS32_GUEST_LDTR_LIMIT,
489 VMX_VMCS32_GUEST_TR_LIMIT,
490 VMX_VMCS32_GUEST_GDTR_LIMIT,
491 VMX_VMCS32_GUEST_IDTR_LIMIT,
492 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_INT_STATE,
501 VMX_VMCS32_GUEST_ACTIVITY_STATE,
502 VMX_VMCS32_GUEST_SMBASE,
503 VMX_VMCS32_GUEST_SYSENTER_CS,
504 VMX_VMCS32_PREEMPT_TIMER_VALUE,
505
506 /* 32-bit host-state fields. */
507 VMX_VMCS32_HOST_SYSENTER_CS,
508
509 /* Natural-width control fields. */
510 VMX_VMCS_CTRL_CR0_MASK,
511 VMX_VMCS_CTRL_CR4_MASK,
512 VMX_VMCS_CTRL_CR0_READ_SHADOW,
513 VMX_VMCS_CTRL_CR4_READ_SHADOW,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
517 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
518
519 /* Natural-width read-only data fields. */
520 VMX_VMCS_RO_EXIT_QUALIFICATION,
521 VMX_VMCS_RO_IO_RCX,
522 VMX_VMCS_RO_IO_RSI,
523 VMX_VMCS_RO_IO_RDI,
524 VMX_VMCS_RO_IO_RIP,
525 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
526
527 /* Natural-width guest-state field */
528 VMX_VMCS_GUEST_CR0,
529 VMX_VMCS_GUEST_CR3,
530 VMX_VMCS_GUEST_CR4,
531 VMX_VMCS_GUEST_ES_BASE,
532 VMX_VMCS_GUEST_CS_BASE,
533 VMX_VMCS_GUEST_SS_BASE,
534 VMX_VMCS_GUEST_DS_BASE,
535 VMX_VMCS_GUEST_FS_BASE,
536 VMX_VMCS_GUEST_GS_BASE,
537 VMX_VMCS_GUEST_LDTR_BASE,
538 VMX_VMCS_GUEST_TR_BASE,
539 VMX_VMCS_GUEST_GDTR_BASE,
540 VMX_VMCS_GUEST_IDTR_BASE,
541 VMX_VMCS_GUEST_DR7,
542 VMX_VMCS_GUEST_RSP,
543 VMX_VMCS_GUEST_RIP,
544 VMX_VMCS_GUEST_RFLAGS,
545 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
546 VMX_VMCS_GUEST_SYSENTER_ESP,
547 VMX_VMCS_GUEST_SYSENTER_EIP,
548 VMX_VMCS_GUEST_S_CET,
549 VMX_VMCS_GUEST_SSP,
550 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
551
552 /* Natural-width host-state fields */
553 VMX_VMCS_HOST_CR0,
554 VMX_VMCS_HOST_CR3,
555 VMX_VMCS_HOST_CR4,
556 VMX_VMCS_HOST_FS_BASE,
557 VMX_VMCS_HOST_GS_BASE,
558 VMX_VMCS_HOST_TR_BASE,
559 VMX_VMCS_HOST_GDTR_BASE,
560 VMX_VMCS_HOST_IDTR_BASE,
561 VMX_VMCS_HOST_SYSENTER_ESP,
562 VMX_VMCS_HOST_SYSENTER_EIP,
563 VMX_VMCS_HOST_RSP,
564 VMX_VMCS_HOST_RIP,
565 VMX_VMCS_HOST_S_CET,
566 VMX_VMCS_HOST_SSP,
567 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
568};
569#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
570
571#ifdef VBOX_STRICT
572static const uint32_t g_aVmcsSegBase[] =
573{
574 VMX_VMCS_GUEST_ES_BASE,
575 VMX_VMCS_GUEST_CS_BASE,
576 VMX_VMCS_GUEST_SS_BASE,
577 VMX_VMCS_GUEST_DS_BASE,
578 VMX_VMCS_GUEST_FS_BASE,
579 VMX_VMCS_GUEST_GS_BASE
580};
581static const uint32_t g_aVmcsSegSel[] =
582{
583 VMX_VMCS16_GUEST_ES_SEL,
584 VMX_VMCS16_GUEST_CS_SEL,
585 VMX_VMCS16_GUEST_SS_SEL,
586 VMX_VMCS16_GUEST_DS_SEL,
587 VMX_VMCS16_GUEST_FS_SEL,
588 VMX_VMCS16_GUEST_GS_SEL
589};
590static const uint32_t g_aVmcsSegLimit[] =
591{
592 VMX_VMCS32_GUEST_ES_LIMIT,
593 VMX_VMCS32_GUEST_CS_LIMIT,
594 VMX_VMCS32_GUEST_SS_LIMIT,
595 VMX_VMCS32_GUEST_DS_LIMIT,
596 VMX_VMCS32_GUEST_FS_LIMIT,
597 VMX_VMCS32_GUEST_GS_LIMIT
598};
599static const uint32_t g_aVmcsSegAttr[] =
600{
601 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
602 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
603 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
604 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
605 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
606 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
607};
608AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
609AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
610AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
611AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
612#endif /* VBOX_STRICT */
613
614#ifdef HMVMX_USE_FUNCTION_TABLE
615/**
616 * VMX_EXIT dispatch table.
617 */
618static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
619{
620 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
621 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
622 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
623 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
624 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
625 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
626 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
627 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
628 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
629 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
630 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
631 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
632 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
633 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
634 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
635 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
636 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
637 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
638 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
639#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
640 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
641 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
642 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
643 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
644 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
645 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
646 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
647 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
648 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
649#else
650 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
651 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
652 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
653 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
654 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
655 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
656 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
657 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
658 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
659#endif
660 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
661 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
662 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
663 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
664 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
665 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
666 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
667 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
668 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
669 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
670 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
671 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
672 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
673 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
674 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
675 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
676 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
677 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
678 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
679 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
680 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
681 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
682 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
683 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
684 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
685#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
686 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
687#else
688 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
689#endif
690 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
691 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
692 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
693 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
694 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
695 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
696 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
697 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
698 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
699 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
700 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
701 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
702 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
703 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
704 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
705 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
706};
707#endif /* HMVMX_USE_FUNCTION_TABLE */
708
709#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
710static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
711{
712 /* 0 */ "(Not Used)",
713 /* 1 */ "VMCALL executed in VMX root operation.",
714 /* 2 */ "VMCLEAR with invalid physical address.",
715 /* 3 */ "VMCLEAR with VMXON pointer.",
716 /* 4 */ "VMLAUNCH with non-clear VMCS.",
717 /* 5 */ "VMRESUME with non-launched VMCS.",
718 /* 6 */ "VMRESUME after VMXOFF",
719 /* 7 */ "VM-entry with invalid control fields.",
720 /* 8 */ "VM-entry with invalid host state fields.",
721 /* 9 */ "VMPTRLD with invalid physical address.",
722 /* 10 */ "VMPTRLD with VMXON pointer.",
723 /* 11 */ "VMPTRLD with incorrect revision identifier.",
724 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
725 /* 13 */ "VMWRITE to read-only VMCS component.",
726 /* 14 */ "(Not Used)",
727 /* 15 */ "VMXON executed in VMX root operation.",
728 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
729 /* 17 */ "VM-entry with non-launched executing VMCS.",
730 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
731 /* 19 */ "VMCALL with non-clear VMCS.",
732 /* 20 */ "VMCALL with invalid VM-exit control fields.",
733 /* 21 */ "(Not Used)",
734 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
735 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
736 /* 24 */ "VMCALL with invalid SMM-monitor features.",
737 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
738 /* 26 */ "VM-entry with events blocked by MOV SS.",
739 /* 27 */ "(Not Used)",
740 /* 28 */ "Invalid operand to INVEPT/INVVPID."
741};
742#endif /* VBOX_STRICT && LOG_ENABLED */
743
744
745/**
746 * Gets the CR0 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR0 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
759 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
760 *
761 * Furthermore, modifications to any bits that are reserved/unspecified currently
762 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
763 * when future CPUs specify and use currently reserved/unspecified bits.
764 */
765 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
766 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
767 * and @bugref{6944}. */
768 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
769 return ( X86_CR0_PE
770 | X86_CR0_NE
771 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
772 | X86_CR0_PG
773 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
774}
775
776
777/**
778 * Gets the CR4 guest/host mask.
779 *
780 * These bits typically does not change through the lifetime of a VM. Any bit set in
781 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
782 * by the guest.
783 *
784 * @returns The CR4 guest/host mask.
785 * @param pVCpu The cross context virtual CPU structure.
786 */
787static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
788{
789 /*
790 * We construct a mask of all CR4 bits that the guest can modify without causing
791 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
792 * a VM-exit when the guest attempts to modify them when executing using
793 * hardware-assisted VMX.
794 *
795 * When a feature is not exposed to the guest (and may be present on the host),
796 * we want to intercept guest modifications to the bit so we can emulate proper
797 * behavior (e.g., #GP).
798 *
799 * Furthermore, only modifications to those bits that don't require immediate
800 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
801 * depends on CR3 which might not always be the guest value while executing
802 * using hardware-assisted VMX.
803 */
804 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
805 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
806 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
807 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
808
809 /*
810 * Paranoia.
811 * Ensure features exposed to the guest are present on the host.
812 */
813 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
814 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
815 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
816
817 uint64_t const fGstMask = ( X86_CR4_PVI
818 | X86_CR4_TSD
819 | X86_CR4_DE
820 | X86_CR4_MCE
821 | X86_CR4_PCE
822 | X86_CR4_OSXMMEEXCPT
823 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
824 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
825 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
826 return ~fGstMask;
827}
828
829
830/**
831 * Adds one or more exceptions to the exception bitmap and commits it to the current
832 * VMCS.
833 *
834 * @param pVCpu The cross context virtual CPU structure.
835 * @param pVmxTransient The VMX-transient structure.
836 * @param uXcptMask The exception(s) to add.
837 */
838static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
839{
840 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
841 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
842 if ((uXcptBitmap & uXcptMask) != uXcptMask)
843 {
844 uXcptBitmap |= uXcptMask;
845 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
846 AssertRC(rc);
847 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
848 }
849}
850
851
852/**
853 * Adds an exception to the exception bitmap and commits it to the current VMCS.
854 *
855 * @param pVCpu The cross context virtual CPU structure.
856 * @param pVmxTransient The VMX-transient structure.
857 * @param uXcpt The exception to add.
858 */
859static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
860{
861 Assert(uXcpt <= X86_XCPT_LAST);
862 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
863}
864
865
866/**
867 * Remove one or more exceptions from the exception bitmap and commits it to the
868 * current VMCS.
869 *
870 * This takes care of not removing the exception intercept if a nested-guest
871 * requires the exception to be intercepted.
872 *
873 * @returns VBox status code.
874 * @param pVCpu The cross context virtual CPU structure.
875 * @param pVmxTransient The VMX-transient structure.
876 * @param uXcptMask The exception(s) to remove.
877 */
878static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
879{
880 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
881 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
882 if (u32XcptBitmap & uXcptMask)
883 {
884#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
885 if (!pVmxTransient->fIsNestedGuest)
886 { /* likely */ }
887 else
888 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
889#endif
890#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
891 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
892 | RT_BIT(X86_XCPT_DE)
893 | RT_BIT(X86_XCPT_NM)
894 | RT_BIT(X86_XCPT_TS)
895 | RT_BIT(X86_XCPT_UD)
896 | RT_BIT(X86_XCPT_NP)
897 | RT_BIT(X86_XCPT_SS)
898 | RT_BIT(X86_XCPT_GP)
899 | RT_BIT(X86_XCPT_PF)
900 | RT_BIT(X86_XCPT_MF));
901#elif defined(HMVMX_ALWAYS_TRAP_PF)
902 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
903#endif
904 if (uXcptMask)
905 {
906 /* Validate we are not removing any essential exception intercepts. */
907#ifndef IN_NEM_DARWIN
908 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
909#else
910 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
911#endif
912 NOREF(pVCpu);
913 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
914 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
915
916 /* Remove it from the exception bitmap. */
917 u32XcptBitmap &= ~uXcptMask;
918
919 /* Commit and update the cache if necessary. */
920 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
921 {
922 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
923 AssertRC(rc);
924 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
925 }
926 }
927 }
928 return VINF_SUCCESS;
929}
930
931
932/**
933 * Remove an exceptions from the exception bitmap and commits it to the current
934 * VMCS.
935 *
936 * @returns VBox status code.
937 * @param pVCpu The cross context virtual CPU structure.
938 * @param pVmxTransient The VMX-transient structure.
939 * @param uXcpt The exception to remove.
940 */
941static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
942{
943 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
944}
945
946
947#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
948/**
949 * Loads the shadow VMCS specified by the VMCS info. object.
950 *
951 * @returns VBox status code.
952 * @param pVmcsInfo The VMCS info. object.
953 *
954 * @remarks Can be called with interrupts disabled.
955 */
956static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
957{
958 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
959 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
960
961 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
962 if (RT_SUCCESS(rc))
963 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
964 return rc;
965}
966
967
968/**
969 * Clears the shadow VMCS specified by the VMCS info. object.
970 *
971 * @returns VBox status code.
972 * @param pVmcsInfo The VMCS info. object.
973 *
974 * @remarks Can be called with interrupts disabled.
975 */
976static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
977{
978 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
979 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
980
981 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
982 if (RT_SUCCESS(rc))
983 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
984 return rc;
985}
986
987
988/**
989 * Switches from and to the specified VMCSes.
990 *
991 * @returns VBox status code.
992 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
993 * @param pVmcsInfoTo The VMCS info. object we are switching to.
994 *
995 * @remarks Called with interrupts disabled.
996 */
997static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
998{
999 /*
1000 * Clear the VMCS we are switching out if it has not already been cleared.
1001 * This will sync any CPU internal data back to the VMCS.
1002 */
1003 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1004 {
1005 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1006 if (RT_SUCCESS(rc))
1007 {
1008 /*
1009 * The shadow VMCS, if any, would not be active at this point since we
1010 * would have cleared it while importing the virtual hardware-virtualization
1011 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1012 * clear the shadow VMCS here, just assert for safety.
1013 */
1014 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1015 }
1016 else
1017 return rc;
1018 }
1019
1020 /*
1021 * Clear the VMCS we are switching to if it has not already been cleared.
1022 * This will initialize the VMCS launch state to "clear" required for loading it.
1023 *
1024 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1025 */
1026 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1027 {
1028 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1029 if (RT_SUCCESS(rc))
1030 { /* likely */ }
1031 else
1032 return rc;
1033 }
1034
1035 /*
1036 * Finally, load the VMCS we are switching to.
1037 */
1038 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1039}
1040
1041
1042/**
1043 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1044 * caller.
1045 *
1046 * @returns VBox status code.
1047 * @param pVCpu The cross context virtual CPU structure.
1048 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1049 * true) or guest VMCS (pass false).
1050 */
1051static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1052{
1053 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1054 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1055
1056 PVMXVMCSINFO pVmcsInfoFrom;
1057 PVMXVMCSINFO pVmcsInfoTo;
1058 if (fSwitchToNstGstVmcs)
1059 {
1060 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1061 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1062 }
1063 else
1064 {
1065 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1066 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1067 }
1068
1069 /*
1070 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1071 * preemption hook code path acquires the current VMCS.
1072 */
1073 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1074
1075 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1076 if (RT_SUCCESS(rc))
1077 {
1078 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1079 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1080
1081 /*
1082 * If we are switching to a VMCS that was executed on a different host CPU or was
1083 * never executed before, flag that we need to export the host state before executing
1084 * guest/nested-guest code using hardware-assisted VMX.
1085 *
1086 * This could probably be done in a preemptible context since the preemption hook
1087 * will flag the necessary change in host context. However, since preemption is
1088 * already disabled and to avoid making assumptions about host specific code in
1089 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1090 * disabled.
1091 */
1092 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1093 { /* likely */ }
1094 else
1095 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1096
1097 ASMSetFlags(fEFlags);
1098
1099 /*
1100 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1101 * flag that we need to update the host MSR values there. Even if we decide in the
1102 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1103 * if its content differs, we would have to update the host MSRs anyway.
1104 */
1105 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1106 }
1107 else
1108 ASMSetFlags(fEFlags);
1109 return rc;
1110}
1111#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1112
1113
1114#ifdef VBOX_STRICT
1115/**
1116 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1117 * transient structure.
1118 *
1119 * @param pVCpu The cross context virtual CPU structure.
1120 * @param pVmxTransient The VMX-transient structure.
1121 */
1122DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1123{
1124 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1125 AssertRC(rc);
1126}
1127
1128
1129/**
1130 * Reads the VM-entry exception error code field from the VMCS into
1131 * the VMX transient structure.
1132 *
1133 * @param pVCpu The cross context virtual CPU structure.
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1137{
1138 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1139 AssertRC(rc);
1140}
1141
1142
1143/**
1144 * Reads the VM-entry exception error code field from the VMCS into
1145 * the VMX transient structure.
1146 *
1147 * @param pVCpu The cross context virtual CPU structure.
1148 * @param pVmxTransient The VMX-transient structure.
1149 */
1150DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1151{
1152 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1153 AssertRC(rc);
1154}
1155#endif /* VBOX_STRICT */
1156
1157
1158/**
1159 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1160 * transient structure.
1161 *
1162 * @param pVCpu The cross context virtual CPU structure.
1163 * @param pVmxTransient The VMX-transient structure.
1164 */
1165DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1166{
1167 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1168 {
1169 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1170 AssertRC(rc);
1171 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1172 }
1173}
1174
1175
1176/**
1177 * Reads the VM-exit interruption error code from the VMCS into the VMX
1178 * transient structure.
1179 *
1180 * @param pVCpu The cross context virtual CPU structure.
1181 * @param pVmxTransient The VMX-transient structure.
1182 */
1183DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1184{
1185 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1186 {
1187 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1188 AssertRC(rc);
1189 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1190 }
1191}
1192
1193
1194/**
1195 * Reads the VM-exit instruction length field from the VMCS into the VMX
1196 * transient structure.
1197 *
1198 * @param pVCpu The cross context virtual CPU structure.
1199 * @param pVmxTransient The VMX-transient structure.
1200 */
1201DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1202{
1203 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1204 {
1205 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1206 AssertRC(rc);
1207 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1208 }
1209}
1210
1211
1212/**
1213 * Reads the VM-exit instruction-information field from the VMCS into
1214 * the VMX transient structure.
1215 *
1216 * @param pVCpu The cross context virtual CPU structure.
1217 * @param pVmxTransient The VMX-transient structure.
1218 */
1219DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1220{
1221 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1222 {
1223 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1224 AssertRC(rc);
1225 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1226 }
1227}
1228
1229
1230/**
1231 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1232 *
1233 * @param pVCpu The cross context virtual CPU structure.
1234 * @param pVmxTransient The VMX-transient structure.
1235 */
1236DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1237{
1238 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1239 {
1240 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1241 AssertRC(rc);
1242 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1243 }
1244}
1245
1246
1247/**
1248 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1249 *
1250 * @param pVCpu The cross context virtual CPU structure.
1251 * @param pVmxTransient The VMX-transient structure.
1252 */
1253DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1254{
1255 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1256 {
1257 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1258 AssertRC(rc);
1259 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1260 }
1261}
1262
1263
1264/**
1265 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1266 *
1267 * @param pVCpu The cross context virtual CPU structure.
1268 * @param pVmxTransient The VMX-transient structure.
1269 */
1270DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1271{
1272 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1273 {
1274 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1275 AssertRC(rc);
1276 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1277 }
1278}
1279
1280#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1281/**
1282 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1283 * structure.
1284 *
1285 * @param pVCpu The cross context virtual CPU structure.
1286 * @param pVmxTransient The VMX-transient structure.
1287 */
1288DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1289{
1290 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1291 {
1292 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1293 AssertRC(rc);
1294 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1295 }
1296}
1297#endif
1298
1299/**
1300 * Reads the IDT-vectoring information field from the VMCS into the VMX
1301 * transient structure.
1302 *
1303 * @param pVCpu The cross context virtual CPU structure.
1304 * @param pVmxTransient The VMX-transient structure.
1305 *
1306 * @remarks No-long-jump zone!!!
1307 */
1308DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1309{
1310 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1311 {
1312 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1313 AssertRC(rc);
1314 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1315 }
1316}
1317
1318
1319/**
1320 * Reads the IDT-vectoring error code from the VMCS into the VMX
1321 * transient structure.
1322 *
1323 * @param pVCpu The cross context virtual CPU structure.
1324 * @param pVmxTransient The VMX-transient structure.
1325 */
1326DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1327{
1328 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1329 {
1330 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1331 AssertRC(rc);
1332 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1333 }
1334}
1335
1336#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1337/**
1338 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1339 *
1340 * @param pVCpu The cross context virtual CPU structure.
1341 * @param pVmxTransient The VMX-transient structure.
1342 */
1343static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1344{
1345 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1346 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1347 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1348 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1349 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1350 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1351 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1352 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1353 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1354 AssertRC(rc);
1355 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1356 | HMVMX_READ_EXIT_INSTR_LEN
1357 | HMVMX_READ_EXIT_INSTR_INFO
1358 | HMVMX_READ_IDT_VECTORING_INFO
1359 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1360 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1361 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1362 | HMVMX_READ_GUEST_LINEAR_ADDR
1363 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1364}
1365#endif
1366
1367/**
1368 * Verifies that our cached values of the VMCS fields are all consistent with
1369 * what's actually present in the VMCS.
1370 *
1371 * @returns VBox status code.
1372 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1373 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1374 * VMCS content. HMCPU error-field is
1375 * updated, see VMX_VCI_XXX.
1376 * @param pVCpu The cross context virtual CPU structure.
1377 * @param pVmcsInfo The VMCS info. object.
1378 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1379 */
1380static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1381{
1382 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1383
1384 uint32_t u32Val;
1385 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1386 AssertRC(rc);
1387 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1388 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1389 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1390 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1391
1392 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1395 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398
1399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1400 AssertRC(rc);
1401 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1402 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1403 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1404 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1405
1406 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1407 AssertRC(rc);
1408 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1409 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1410 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1411 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1412
1413 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1414 {
1415 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1416 AssertRC(rc);
1417 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1418 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1419 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1420 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1421 }
1422
1423 uint64_t u64Val;
1424 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1425 {
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1429 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432 }
1433
1434 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1435 AssertRC(rc);
1436 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1437 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1438 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1439 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1440
1441 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1442 AssertRC(rc);
1443 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1444 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1445 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1446 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1447
1448 NOREF(pcszVmcs);
1449 return VINF_SUCCESS;
1450}
1451
1452
1453/**
1454 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1455 * VMCS.
1456 *
1457 * This is typically required when the guest changes paging mode.
1458 *
1459 * @returns VBox status code.
1460 * @param pVCpu The cross context virtual CPU structure.
1461 * @param pVmxTransient The VMX-transient structure.
1462 *
1463 * @remarks Requires EFER.
1464 * @remarks No-long-jump zone!!!
1465 */
1466static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1467{
1468 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1469 {
1470 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1471 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1472
1473 /*
1474 * VM-entry controls.
1475 */
1476 {
1477 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1478 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1479
1480 /*
1481 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1482 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1483 *
1484 * For nested-guests, this is a mandatory VM-entry control. It's also
1485 * required because we do not want to leak host bits to the nested-guest.
1486 */
1487 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1488
1489 /*
1490 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1491 *
1492 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1493 * required to get the nested-guest working with hardware-assisted VMX execution.
1494 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1495 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1496 * here rather than while merging the guest VMCS controls.
1497 */
1498 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1499 {
1500 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1501 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1502 }
1503 else
1504 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1505
1506 /*
1507 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1508 *
1509 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1510 * regardless of whether the nested-guest VMCS specifies it because we are free to
1511 * load whatever MSRs we require and we do not need to modify the guest visible copy
1512 * of the VM-entry MSR load area.
1513 */
1514 if ( g_fHmVmxSupportsVmcsEfer
1515#ifndef IN_NEM_DARWIN
1516 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1517#endif
1518 )
1519 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1520 else
1521 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1522
1523 /*
1524 * The following should -not- be set (since we're not in SMM mode):
1525 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1526 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1527 */
1528
1529 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1530 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1531
1532 if ((fVal & fZap) == fVal)
1533 { /* likely */ }
1534 else
1535 {
1536 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1537 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1538 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1539 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1540 }
1541
1542 /* Commit it to the VMCS. */
1543 if (pVmcsInfo->u32EntryCtls != fVal)
1544 {
1545 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1546 AssertRC(rc);
1547 pVmcsInfo->u32EntryCtls = fVal;
1548 }
1549 }
1550
1551 /*
1552 * VM-exit controls.
1553 */
1554 {
1555 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1556 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1557
1558 /*
1559 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1560 * supported the 1-setting of this bit.
1561 *
1562 * For nested-guests, we set the "save debug controls" as the converse
1563 * "load debug controls" is mandatory for nested-guests anyway.
1564 */
1565 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1566
1567 /*
1568 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1569 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1570 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1571 * vmxHCExportHostMsrs().
1572 *
1573 * For nested-guests, we always set this bit as we do not support 32-bit
1574 * hosts.
1575 */
1576 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1577
1578#ifndef IN_NEM_DARWIN
1579 /*
1580 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1581 *
1582 * For nested-guests, we should use the "save IA32_EFER" control if we also
1583 * used the "load IA32_EFER" control while exporting VM-entry controls.
1584 */
1585 if ( g_fHmVmxSupportsVmcsEfer
1586 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1587 {
1588 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1589 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1590 }
1591#endif
1592
1593 /*
1594 * Enable saving of the VMX-preemption timer value on VM-exit.
1595 * For nested-guests, currently not exposed/used.
1596 */
1597 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1598 * the timer value. */
1599 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1600 {
1601 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1602 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1603 }
1604
1605 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1606 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1607
1608 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1609 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1610 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1611
1612 if ((fVal & fZap) == fVal)
1613 { /* likely */ }
1614 else
1615 {
1616 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1617 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1618 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1619 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1620 }
1621
1622 /* Commit it to the VMCS. */
1623 if (pVmcsInfo->u32ExitCtls != fVal)
1624 {
1625 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1626 AssertRC(rc);
1627 pVmcsInfo->u32ExitCtls = fVal;
1628 }
1629 }
1630
1631 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1632 }
1633 return VINF_SUCCESS;
1634}
1635
1636
1637/**
1638 * Sets the TPR threshold in the VMCS.
1639 *
1640 * @param pVCpu The cross context virtual CPU structure.
1641 * @param pVmcsInfo The VMCS info. object.
1642 * @param u32TprThreshold The TPR threshold (task-priority class only).
1643 */
1644DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1645{
1646 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1647 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1648 RT_NOREF(pVmcsInfo);
1649 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1650 AssertRC(rc);
1651}
1652
1653
1654/**
1655 * Exports the guest APIC TPR state into the VMCS.
1656 *
1657 * @param pVCpu The cross context virtual CPU structure.
1658 * @param pVmxTransient The VMX-transient structure.
1659 *
1660 * @remarks No-long-jump zone!!!
1661 */
1662static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1663{
1664 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1665 {
1666 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1667
1668 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1669 if (!pVmxTransient->fIsNestedGuest)
1670 {
1671 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1672 && APICIsEnabled(pVCpu))
1673 {
1674 /*
1675 * Setup TPR shadowing.
1676 */
1677 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1678 {
1679 bool fPendingIntr = false;
1680 uint8_t u8Tpr = 0;
1681 uint8_t u8PendingIntr = 0;
1682 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1683 AssertRC(rc);
1684
1685 /*
1686 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1687 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1688 * priority of the pending interrupt so we can deliver the interrupt. If there
1689 * are no interrupts pending, set threshold to 0 to not cause any
1690 * TPR-below-threshold VM-exits.
1691 */
1692 uint32_t u32TprThreshold = 0;
1693 if (fPendingIntr)
1694 {
1695 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1696 (which is the Task-Priority Class). */
1697 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1698 const uint8_t u8TprPriority = u8Tpr >> 4;
1699 if (u8PendingPriority <= u8TprPriority)
1700 u32TprThreshold = u8PendingPriority;
1701 }
1702
1703 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1704 }
1705 }
1706 }
1707 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1708 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1709 }
1710}
1711
1712
1713/**
1714 * Gets the guest interruptibility-state and updates related force-flags.
1715 *
1716 * @returns Guest's interruptibility-state.
1717 * @param pVCpu The cross context virtual CPU structure.
1718 *
1719 * @remarks No-long-jump zone!!!
1720 */
1721static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1722{
1723 /*
1724 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1725 */
1726 uint32_t fIntrState = 0;
1727 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1728 {
1729 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1730 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1731
1732 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1733 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1734 {
1735 if (pCtx->eflags.Bits.u1IF)
1736 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1737 else
1738 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1739 }
1740 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1741 {
1742 /*
1743 * We can clear the inhibit force flag as even if we go back to the recompiler
1744 * without executing guest code in VT-x, the flag's condition to be cleared is
1745 * met and thus the cleared state is correct.
1746 */
1747 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1748 }
1749 }
1750
1751 /*
1752 * Check if we should inhibit NMI delivery.
1753 */
1754 if (CPUMIsGuestNmiBlocking(pVCpu))
1755 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1756
1757 /*
1758 * Validate.
1759 */
1760#ifdef VBOX_STRICT
1761 /* We don't support block-by-SMI yet.*/
1762 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1763
1764 /* Block-by-STI must not be set when interrupts are disabled. */
1765 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1766 {
1767 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1768 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1769 }
1770#endif
1771
1772 return fIntrState;
1773}
1774
1775
1776/**
1777 * Exports the exception intercepts required for guest execution in the VMCS.
1778 *
1779 * @param pVCpu The cross context virtual CPU structure.
1780 * @param pVmxTransient The VMX-transient structure.
1781 *
1782 * @remarks No-long-jump zone!!!
1783 */
1784static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1785{
1786 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1787 {
1788 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1789 if ( !pVmxTransient->fIsNestedGuest
1790 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1791 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1792 else
1793 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1794
1795 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1796 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1797 }
1798}
1799
1800
1801/**
1802 * Exports the guest's RIP into the guest-state area in the VMCS.
1803 *
1804 * @param pVCpu The cross context virtual CPU structure.
1805 *
1806 * @remarks No-long-jump zone!!!
1807 */
1808static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1809{
1810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1811 {
1812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1813
1814 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1815 AssertRC(rc);
1816
1817 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1818 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1819 }
1820}
1821
1822
1823/**
1824 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1825 *
1826 * @param pVCpu The cross context virtual CPU structure.
1827 * @param pVmxTransient The VMX-transient structure.
1828 *
1829 * @remarks No-long-jump zone!!!
1830 */
1831static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1832{
1833 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1834 {
1835 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1836
1837 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1838 Let us assert it as such and use 32-bit VMWRITE. */
1839 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1840 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1841 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1842 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1843
1844#ifndef IN_NEM_DARWIN
1845 /*
1846 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1847 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1848 * can run the real-mode guest code under Virtual 8086 mode.
1849 */
1850 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1851 if (pVmcsInfo->RealMode.fRealOnV86Active)
1852 {
1853 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1854 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1855 Assert(!pVmxTransient->fIsNestedGuest);
1856 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1857 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1858 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1859 }
1860#else
1861 RT_NOREF(pVmxTransient);
1862#endif
1863
1864 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1865 AssertRC(rc);
1866
1867 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1868 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1869 }
1870}
1871
1872
1873#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1874/**
1875 * Copies the nested-guest VMCS to the shadow VMCS.
1876 *
1877 * @returns VBox status code.
1878 * @param pVCpu The cross context virtual CPU structure.
1879 * @param pVmcsInfo The VMCS info. object.
1880 *
1881 * @remarks No-long-jump zone!!!
1882 */
1883static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1884{
1885 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1886 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1887
1888 /*
1889 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1890 * current VMCS, as we may try saving guest lazy MSRs.
1891 *
1892 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1893 * calling the import VMCS code which is currently performing the guest MSR reads
1894 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1895 * and the rest of the VMX leave session machinery.
1896 */
1897 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1898
1899 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1900 if (RT_SUCCESS(rc))
1901 {
1902 /*
1903 * Copy all guest read/write VMCS fields.
1904 *
1905 * We don't check for VMWRITE failures here for performance reasons and
1906 * because they are not expected to fail, barring irrecoverable conditions
1907 * like hardware errors.
1908 */
1909 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1910 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1911 {
1912 uint64_t u64Val;
1913 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1914 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1915 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1916 }
1917
1918 /*
1919 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1920 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1921 */
1922 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1923 {
1924 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1925 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1926 {
1927 uint64_t u64Val;
1928 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1929 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1930 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1931 }
1932 }
1933
1934 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1935 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1936 }
1937
1938 ASMSetFlags(fEFlags);
1939 return rc;
1940}
1941
1942
1943/**
1944 * Copies the shadow VMCS to the nested-guest VMCS.
1945 *
1946 * @returns VBox status code.
1947 * @param pVCpu The cross context virtual CPU structure.
1948 * @param pVmcsInfo The VMCS info. object.
1949 *
1950 * @remarks Called with interrupts disabled.
1951 */
1952static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1953{
1954 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1955 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1956 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1957
1958 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1959 if (RT_SUCCESS(rc))
1960 {
1961 /*
1962 * Copy guest read/write fields from the shadow VMCS.
1963 * Guest read-only fields cannot be modified, so no need to copy them.
1964 *
1965 * We don't check for VMREAD failures here for performance reasons and
1966 * because they are not expected to fail, barring irrecoverable conditions
1967 * like hardware errors.
1968 */
1969 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1970 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1971 {
1972 uint64_t u64Val;
1973 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1974 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1975 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1976 }
1977
1978 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1979 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1980 }
1981 return rc;
1982}
1983
1984
1985/**
1986 * Enables VMCS shadowing for the given VMCS info. object.
1987 *
1988 * @param pVCpu The cross context virtual CPU structure.
1989 * @param pVmcsInfo The VMCS info. object.
1990 *
1991 * @remarks No-long-jump zone!!!
1992 */
1993static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1994{
1995 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1996 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1997 {
1998 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1999 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2000 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2001 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2002 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2003 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2004 Log4Func(("Enabled\n"));
2005 }
2006}
2007
2008
2009/**
2010 * Disables VMCS shadowing for the given VMCS info. object.
2011 *
2012 * @param pVCpu The cross context virtual CPU structure.
2013 * @param pVmcsInfo The VMCS info. object.
2014 *
2015 * @remarks No-long-jump zone!!!
2016 */
2017static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2018{
2019 /*
2020 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2021 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2022 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2023 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2024 *
2025 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2026 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2027 */
2028 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2029 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2030 {
2031 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2032 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2033 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2034 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2035 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2036 Log4Func(("Disabled\n"));
2037 }
2038}
2039#endif
2040
2041
2042/**
2043 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2044 *
2045 * The guest FPU state is always pre-loaded hence we don't need to bother about
2046 * sharing FPU related CR0 bits between the guest and host.
2047 *
2048 * @returns VBox status code.
2049 * @param pVCpu The cross context virtual CPU structure.
2050 * @param pVmxTransient The VMX-transient structure.
2051 *
2052 * @remarks No-long-jump zone!!!
2053 */
2054static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2055{
2056 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2057 {
2058 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2059 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2060
2061 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2062 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2063 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2064 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2065 else
2066 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2067
2068 if (!pVmxTransient->fIsNestedGuest)
2069 {
2070 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2071 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2072 uint64_t const u64ShadowCr0 = u64GuestCr0;
2073 Assert(!RT_HI_U32(u64GuestCr0));
2074
2075 /*
2076 * Setup VT-x's view of the guest CR0.
2077 */
2078 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2079 if (VM_IS_VMX_NESTED_PAGING(pVM))
2080 {
2081 if (CPUMIsGuestPagingEnabled(pVCpu))
2082 {
2083 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2084 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2085 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2086 }
2087 else
2088 {
2089 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2090 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2091 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2092 }
2093
2094 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2095 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2096 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2097 }
2098 else
2099 {
2100 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2101 u64GuestCr0 |= X86_CR0_WP;
2102 }
2103
2104 /*
2105 * Guest FPU bits.
2106 *
2107 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2108 * using CR0.TS.
2109 *
2110 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2111 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2112 */
2113 u64GuestCr0 |= X86_CR0_NE;
2114
2115 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2116 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2117
2118 /*
2119 * Update exception intercepts.
2120 */
2121 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2122#ifndef IN_NEM_DARWIN
2123 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2124 {
2125 Assert(PDMVmmDevHeapIsEnabled(pVM));
2126 Assert(pVM->hm.s.vmx.pRealModeTSS);
2127 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2128 }
2129 else
2130#endif
2131 {
2132 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2133 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2134 if (fInterceptMF)
2135 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2136 }
2137
2138 /* Additional intercepts for debugging, define these yourself explicitly. */
2139#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2140 uXcptBitmap |= 0
2141 | RT_BIT(X86_XCPT_BP)
2142 | RT_BIT(X86_XCPT_DE)
2143 | RT_BIT(X86_XCPT_NM)
2144 | RT_BIT(X86_XCPT_TS)
2145 | RT_BIT(X86_XCPT_UD)
2146 | RT_BIT(X86_XCPT_NP)
2147 | RT_BIT(X86_XCPT_SS)
2148 | RT_BIT(X86_XCPT_GP)
2149 | RT_BIT(X86_XCPT_PF)
2150 | RT_BIT(X86_XCPT_MF)
2151 ;
2152#elif defined(HMVMX_ALWAYS_TRAP_PF)
2153 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2154#endif
2155 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2156 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2157 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2158
2159 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2160 u64GuestCr0 |= fSetCr0;
2161 u64GuestCr0 &= fZapCr0;
2162 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2163
2164 /* Commit the CR0 and related fields to the guest VMCS. */
2165 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2166 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2167 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2168 {
2169 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2170 AssertRC(rc);
2171 }
2172 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2173 {
2174 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2175 AssertRC(rc);
2176 }
2177
2178 /* Update our caches. */
2179 pVmcsInfo->u32ProcCtls = uProcCtls;
2180 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2181
2182 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2183 }
2184 else
2185 {
2186 /*
2187 * With nested-guests, we may have extended the guest/host mask here since we
2188 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2189 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2190 * originally supplied. We must copy those bits from the nested-guest CR0 into
2191 * the nested-guest CR0 read-shadow.
2192 */
2193 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2194 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2195 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2196 Assert(!RT_HI_U32(u64GuestCr0));
2197 Assert(u64GuestCr0 & X86_CR0_NE);
2198
2199 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2200 u64GuestCr0 |= fSetCr0;
2201 u64GuestCr0 &= fZapCr0;
2202 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2203
2204 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2205 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2206 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2207
2208 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2209 }
2210
2211 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2212 }
2213
2214 return VINF_SUCCESS;
2215}
2216
2217
2218/**
2219 * Exports the guest control registers (CR3, CR4) into the guest-state area
2220 * in the VMCS.
2221 *
2222 * @returns VBox strict status code.
2223 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2224 * without unrestricted guest access and the VMMDev is not presently
2225 * mapped (e.g. EFI32).
2226 *
2227 * @param pVCpu The cross context virtual CPU structure.
2228 * @param pVmxTransient The VMX-transient structure.
2229 *
2230 * @remarks No-long-jump zone!!!
2231 */
2232static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2233{
2234 int rc = VINF_SUCCESS;
2235 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2236
2237 /*
2238 * Guest CR2.
2239 * It's always loaded in the assembler code. Nothing to do here.
2240 */
2241
2242 /*
2243 * Guest CR3.
2244 */
2245 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2246 {
2247 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2248
2249 if (VM_IS_VMX_NESTED_PAGING(pVM))
2250 {
2251#ifndef IN_NEM_DARWIN
2252 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2253 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2254
2255 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2256 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2257 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2258 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2259
2260 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2261 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2262 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2263
2264 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2265 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2266 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2267 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2268 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2269 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2270 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2271
2272 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2273 AssertRC(rc);
2274#endif
2275
2276 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2277 uint64_t u64GuestCr3 = pCtx->cr3;
2278 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2279 || CPUMIsGuestPagingEnabledEx(pCtx))
2280 {
2281 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2282 if (CPUMIsGuestInPAEModeEx(pCtx))
2283 {
2284 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2285 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2286 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2287 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2288 }
2289
2290 /*
2291 * The guest's view of its CR3 is unblemished with nested paging when the
2292 * guest is using paging or we have unrestricted guest execution to handle
2293 * the guest when it's not using paging.
2294 */
2295 }
2296#ifndef IN_NEM_DARWIN
2297 else
2298 {
2299 /*
2300 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2301 * thinks it accesses physical memory directly, we use our identity-mapped
2302 * page table to map guest-linear to guest-physical addresses. EPT takes care
2303 * of translating it to host-physical addresses.
2304 */
2305 RTGCPHYS GCPhys;
2306 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2307
2308 /* We obtain it here every time as the guest could have relocated this PCI region. */
2309 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2310 if (RT_SUCCESS(rc))
2311 { /* likely */ }
2312 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2313 {
2314 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2315 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2316 }
2317 else
2318 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2319
2320 u64GuestCr3 = GCPhys;
2321 }
2322#endif
2323
2324 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2325 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2326 AssertRC(rc);
2327 }
2328 else
2329 {
2330 Assert(!pVmxTransient->fIsNestedGuest);
2331 /* Non-nested paging case, just use the hypervisor's CR3. */
2332 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2333
2334 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2335 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2336 AssertRC(rc);
2337 }
2338
2339 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2340 }
2341
2342 /*
2343 * Guest CR4.
2344 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2345 */
2346 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2347 {
2348 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2349 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2350
2351 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2352 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2353
2354 /*
2355 * With nested-guests, we may have extended the guest/host mask here (since we
2356 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2357 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2358 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2359 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2360 */
2361 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2362 uint64_t u64GuestCr4 = pCtx->cr4;
2363 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2364 ? pCtx->cr4
2365 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2366 Assert(!RT_HI_U32(u64GuestCr4));
2367
2368#ifndef IN_NEM_DARWIN
2369 /*
2370 * Setup VT-x's view of the guest CR4.
2371 *
2372 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2373 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2374 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2375 *
2376 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2377 */
2378 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2379 {
2380 Assert(pVM->hm.s.vmx.pRealModeTSS);
2381 Assert(PDMVmmDevHeapIsEnabled(pVM));
2382 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2383 }
2384#endif
2385
2386 if (VM_IS_VMX_NESTED_PAGING(pVM))
2387 {
2388 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2389 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2390 {
2391 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2392 u64GuestCr4 |= X86_CR4_PSE;
2393 /* Our identity mapping is a 32-bit page directory. */
2394 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2395 }
2396 /* else use guest CR4.*/
2397 }
2398 else
2399 {
2400 Assert(!pVmxTransient->fIsNestedGuest);
2401
2402 /*
2403 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2404 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2405 */
2406 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2407 {
2408 case PGMMODE_REAL: /* Real-mode. */
2409 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2410 case PGMMODE_32_BIT: /* 32-bit paging. */
2411 {
2412 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2413 break;
2414 }
2415
2416 case PGMMODE_PAE: /* PAE paging. */
2417 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2418 {
2419 u64GuestCr4 |= X86_CR4_PAE;
2420 break;
2421 }
2422
2423 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2424 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2425 {
2426#ifdef VBOX_WITH_64_BITS_GUESTS
2427 /* For our assumption in vmxHCShouldSwapEferMsr. */
2428 Assert(u64GuestCr4 & X86_CR4_PAE);
2429 break;
2430#endif
2431 }
2432 default:
2433 AssertFailed();
2434 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2435 }
2436 }
2437
2438 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2439 u64GuestCr4 |= fSetCr4;
2440 u64GuestCr4 &= fZapCr4;
2441
2442 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2443 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2444 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2445
2446#ifndef IN_NEM_DARWIN
2447 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2448 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2449 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2450 {
2451 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2452 hmR0VmxUpdateStartVmFunction(pVCpu);
2453 }
2454#endif
2455
2456 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2457
2458 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2459 }
2460 return rc;
2461}
2462
2463
2464#ifdef VBOX_STRICT
2465/**
2466 * Strict function to validate segment registers.
2467 *
2468 * @param pVCpu The cross context virtual CPU structure.
2469 * @param pVmcsInfo The VMCS info. object.
2470 *
2471 * @remarks Will import guest CR0 on strict builds during validation of
2472 * segments.
2473 */
2474static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2475{
2476 /*
2477 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2478 *
2479 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2480 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2481 * unusable bit and doesn't change the guest-context value.
2482 */
2483 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2484 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2485 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2486 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2487 && ( !CPUMIsGuestInRealModeEx(pCtx)
2488 && !CPUMIsGuestInV86ModeEx(pCtx)))
2489 {
2490 /* Protected mode checks */
2491 /* CS */
2492 Assert(pCtx->cs.Attr.n.u1Present);
2493 Assert(!(pCtx->cs.Attr.u & 0xf00));
2494 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2495 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2496 || !(pCtx->cs.Attr.n.u1Granularity));
2497 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2498 || (pCtx->cs.Attr.n.u1Granularity));
2499 /* CS cannot be loaded with NULL in protected mode. */
2500 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2501 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2502 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2503 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2504 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2505 else
2506 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2507 /* SS */
2508 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2509 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2510 if ( !(pCtx->cr0 & X86_CR0_PE)
2511 || pCtx->cs.Attr.n.u4Type == 3)
2512 {
2513 Assert(!pCtx->ss.Attr.n.u2Dpl);
2514 }
2515 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2516 {
2517 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2518 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2519 Assert(pCtx->ss.Attr.n.u1Present);
2520 Assert(!(pCtx->ss.Attr.u & 0xf00));
2521 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2522 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2523 || !(pCtx->ss.Attr.n.u1Granularity));
2524 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2525 || (pCtx->ss.Attr.n.u1Granularity));
2526 }
2527 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2528 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->ds.Attr.n.u1Present);
2532 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->ds.Attr.u & 0xf00));
2534 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->ds.Attr.n.u1Granularity));
2537 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2538 || (pCtx->ds.Attr.n.u1Granularity));
2539 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->es.Attr.n.u1Present);
2546 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->es.Attr.u & 0xf00));
2548 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->es.Attr.n.u1Granularity));
2551 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2552 || (pCtx->es.Attr.n.u1Granularity));
2553 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->fs.Attr.n.u1Present);
2560 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->fs.Attr.u & 0xf00));
2562 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->fs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2566 || (pCtx->fs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2571 {
2572 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2573 Assert(pCtx->gs.Attr.n.u1Present);
2574 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2575 Assert(!(pCtx->gs.Attr.u & 0xf00));
2576 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2577 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2578 || !(pCtx->gs.Attr.n.u1Granularity));
2579 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2580 || (pCtx->gs.Attr.n.u1Granularity));
2581 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2582 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2583 }
2584 /* 64-bit capable CPUs. */
2585 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2586 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2587 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2588 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2589 }
2590 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2591 || ( CPUMIsGuestInRealModeEx(pCtx)
2592 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2593 {
2594 /* Real and v86 mode checks. */
2595 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2596 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2597#ifndef IN_NEM_DARWIN
2598 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2599 {
2600 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2601 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2602 }
2603 else
2604#endif
2605 {
2606 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2607 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2608 }
2609
2610 /* CS */
2611 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2612 Assert(pCtx->cs.u32Limit == 0xffff);
2613 Assert(u32CSAttr == 0xf3);
2614 /* SS */
2615 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2616 Assert(pCtx->ss.u32Limit == 0xffff);
2617 Assert(u32SSAttr == 0xf3);
2618 /* DS */
2619 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2620 Assert(pCtx->ds.u32Limit == 0xffff);
2621 Assert(u32DSAttr == 0xf3);
2622 /* ES */
2623 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2624 Assert(pCtx->es.u32Limit == 0xffff);
2625 Assert(u32ESAttr == 0xf3);
2626 /* FS */
2627 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2628 Assert(pCtx->fs.u32Limit == 0xffff);
2629 Assert(u32FSAttr == 0xf3);
2630 /* GS */
2631 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2632 Assert(pCtx->gs.u32Limit == 0xffff);
2633 Assert(u32GSAttr == 0xf3);
2634 /* 64-bit capable CPUs. */
2635 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2636 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2637 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2638 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2639 }
2640}
2641#endif /* VBOX_STRICT */
2642
2643
2644/**
2645 * Exports a guest segment register into the guest-state area in the VMCS.
2646 *
2647 * @returns VBox status code.
2648 * @param pVCpu The cross context virtual CPU structure.
2649 * @param pVmcsInfo The VMCS info. object.
2650 * @param iSegReg The segment register number (X86_SREG_XXX).
2651 * @param pSelReg Pointer to the segment selector.
2652 *
2653 * @remarks No-long-jump zone!!!
2654 */
2655static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2656{
2657 Assert(iSegReg < X86_SREG_COUNT);
2658
2659 uint32_t u32Access = pSelReg->Attr.u;
2660#ifndef IN_NEM_DARWIN
2661 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2662#endif
2663 {
2664 /*
2665 * The way to differentiate between whether this is really a null selector or was just
2666 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2667 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2668 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2669 * NULL selectors loaded in protected-mode have their attribute as 0.
2670 */
2671 if (u32Access)
2672 { }
2673 else
2674 u32Access = X86DESCATTR_UNUSABLE;
2675 }
2676#ifndef IN_NEM_DARWIN
2677 else
2678 {
2679 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2680 u32Access = 0xf3;
2681 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2682 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2683 RT_NOREF_PV(pVCpu);
2684 }
2685#else
2686 RT_NOREF(pVmcsInfo);
2687#endif
2688
2689 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2690 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2691 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2692
2693 /*
2694 * Commit it to the VMCS.
2695 */
2696 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2697 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2698 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2699 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2700 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2701 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2702 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2703 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2704 return VINF_SUCCESS;
2705}
2706
2707
2708/**
2709 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2710 * area in the VMCS.
2711 *
2712 * @returns VBox status code.
2713 * @param pVCpu The cross context virtual CPU structure.
2714 * @param pVmxTransient The VMX-transient structure.
2715 *
2716 * @remarks Will import guest CR0 on strict builds during validation of
2717 * segments.
2718 * @remarks No-long-jump zone!!!
2719 */
2720static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2721{
2722 int rc = VERR_INTERNAL_ERROR_5;
2723#ifndef IN_NEM_DARWIN
2724 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2725#endif
2726 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2727 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2728#ifndef IN_NEM_DARWIN
2729 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2730#endif
2731
2732 /*
2733 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2734 */
2735 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2736 {
2737 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2738 {
2739 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2740#ifndef IN_NEM_DARWIN
2741 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2742 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2743#endif
2744 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2745 AssertRC(rc);
2746 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2747 }
2748
2749 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2750 {
2751 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2752#ifndef IN_NEM_DARWIN
2753 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2754 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2755#endif
2756 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2757 AssertRC(rc);
2758 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2759 }
2760
2761 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2762 {
2763 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2764#ifndef IN_NEM_DARWIN
2765 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2766 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2767#endif
2768 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2769 AssertRC(rc);
2770 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2771 }
2772
2773 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2774 {
2775 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2776#ifndef IN_NEM_DARWIN
2777 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2778 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2779#endif
2780 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2781 AssertRC(rc);
2782 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2783 }
2784
2785 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2786 {
2787 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2788#ifndef IN_NEM_DARWIN
2789 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2790 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2791#endif
2792 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2793 AssertRC(rc);
2794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2795 }
2796
2797 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2798 {
2799 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2800#ifndef IN_NEM_DARWIN
2801 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2802 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2803#endif
2804 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2805 AssertRC(rc);
2806 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2807 }
2808
2809#ifdef VBOX_STRICT
2810 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2811#endif
2812 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2813 pCtx->cs.Attr.u));
2814 }
2815
2816 /*
2817 * Guest TR.
2818 */
2819 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2820 {
2821 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2822
2823 /*
2824 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2825 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2826 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2827 */
2828 uint16_t u16Sel;
2829 uint32_t u32Limit;
2830 uint64_t u64Base;
2831 uint32_t u32AccessRights;
2832#ifndef IN_NEM_DARWIN
2833 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2834#endif
2835 {
2836 u16Sel = pCtx->tr.Sel;
2837 u32Limit = pCtx->tr.u32Limit;
2838 u64Base = pCtx->tr.u64Base;
2839 u32AccessRights = pCtx->tr.Attr.u;
2840 }
2841#ifndef IN_NEM_DARWIN
2842 else
2843 {
2844 Assert(!pVmxTransient->fIsNestedGuest);
2845 Assert(pVM->hm.s.vmx.pRealModeTSS);
2846 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2847
2848 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2849 RTGCPHYS GCPhys;
2850 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2851 AssertRCReturn(rc, rc);
2852
2853 X86DESCATTR DescAttr;
2854 DescAttr.u = 0;
2855 DescAttr.n.u1Present = 1;
2856 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2857
2858 u16Sel = 0;
2859 u32Limit = HM_VTX_TSS_SIZE;
2860 u64Base = GCPhys;
2861 u32AccessRights = DescAttr.u;
2862 }
2863#endif
2864
2865 /* Validate. */
2866 Assert(!(u16Sel & RT_BIT(2)));
2867 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2868 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2869 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2870 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2871 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2872 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2873 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2874 Assert( (u32Limit & 0xfff) == 0xfff
2875 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2876 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2877 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2878
2879 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2880 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2881 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2882 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2883
2884 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2885 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2886 }
2887
2888 /*
2889 * Guest GDTR.
2890 */
2891 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2892 {
2893 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2894
2895 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2896 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2897
2898 /* Validate. */
2899 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2900
2901 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2902 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2903 }
2904
2905 /*
2906 * Guest LDTR.
2907 */
2908 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2909 {
2910 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2911
2912 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2913 uint32_t u32Access;
2914 if ( !pVmxTransient->fIsNestedGuest
2915 && !pCtx->ldtr.Attr.u)
2916 u32Access = X86DESCATTR_UNUSABLE;
2917 else
2918 u32Access = pCtx->ldtr.Attr.u;
2919
2920 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2921 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2922 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2923 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2924
2925 /* Validate. */
2926 if (!(u32Access & X86DESCATTR_UNUSABLE))
2927 {
2928 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2929 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2930 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2931 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2932 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2933 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2934 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2935 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2936 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2937 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2938 }
2939
2940 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2941 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2942 }
2943
2944 /*
2945 * Guest IDTR.
2946 */
2947 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2948 {
2949 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2950
2951 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2952 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2953
2954 /* Validate. */
2955 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2956
2957 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2958 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2959 }
2960
2961 return VINF_SUCCESS;
2962}
2963
2964
2965/**
2966 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2967 * VM-exit interruption info type.
2968 *
2969 * @returns The IEM exception flags.
2970 * @param uVector The event vector.
2971 * @param uVmxEventType The VMX event type.
2972 *
2973 * @remarks This function currently only constructs flags required for
2974 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2975 * and CR2 aspects of an exception are not included).
2976 */
2977static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2978{
2979 uint32_t fIemXcptFlags;
2980 switch (uVmxEventType)
2981 {
2982 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2983 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2984 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2985 break;
2986
2987 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2988 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2989 break;
2990
2991 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2992 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2993 break;
2994
2995 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2996 {
2997 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2998 if (uVector == X86_XCPT_BP)
2999 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3000 else if (uVector == X86_XCPT_OF)
3001 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3002 else
3003 {
3004 fIemXcptFlags = 0;
3005 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3006 }
3007 break;
3008 }
3009
3010 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3011 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3012 break;
3013
3014 default:
3015 fIemXcptFlags = 0;
3016 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3017 break;
3018 }
3019 return fIemXcptFlags;
3020}
3021
3022
3023/**
3024 * Sets an event as a pending event to be injected into the guest.
3025 *
3026 * @param pVCpu The cross context virtual CPU structure.
3027 * @param u32IntInfo The VM-entry interruption-information field.
3028 * @param cbInstr The VM-entry instruction length in bytes (for
3029 * software interrupts, exceptions and privileged
3030 * software exceptions).
3031 * @param u32ErrCode The VM-entry exception error code.
3032 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3033 * page-fault.
3034 */
3035DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3036 RTGCUINTPTR GCPtrFaultAddress)
3037{
3038 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3039 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3040 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3041 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3042 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3043 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3044}
3045
3046
3047/**
3048 * Sets an external interrupt as pending-for-injection into the VM.
3049 *
3050 * @param pVCpu The cross context virtual CPU structure.
3051 * @param u8Interrupt The external interrupt vector.
3052 */
3053DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3054{
3055 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3056 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3057 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3058 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3059 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3060}
3061
3062
3063/**
3064 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3065 *
3066 * @param pVCpu The cross context virtual CPU structure.
3067 */
3068DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3069{
3070 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3071 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3072 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3073 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3074 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3075}
3076
3077
3078/**
3079 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3080 *
3081 * @param pVCpu The cross context virtual CPU structure.
3082 */
3083DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3084{
3085 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3087 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3088 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3089 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3090}
3091
3092
3093/**
3094 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3095 *
3096 * @param pVCpu The cross context virtual CPU structure.
3097 */
3098DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3099{
3100 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3102 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3103 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3104 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3105}
3106
3107
3108/**
3109 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3110 *
3111 * @param pVCpu The cross context virtual CPU structure.
3112 */
3113DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3114{
3115 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3118 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3119 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3120}
3121
3122
3123#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3124/**
3125 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3126 *
3127 * @param pVCpu The cross context virtual CPU structure.
3128 * @param u32ErrCode The error code for the general-protection exception.
3129 */
3130DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3131{
3132 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3134 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3135 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3136 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3137}
3138
3139
3140/**
3141 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3142 *
3143 * @param pVCpu The cross context virtual CPU structure.
3144 * @param u32ErrCode The error code for the stack exception.
3145 */
3146DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3147{
3148 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3149 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3150 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3151 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3152 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3153}
3154#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3155
3156
3157/**
3158 * Fixes up attributes for the specified segment register.
3159 *
3160 * @param pVCpu The cross context virtual CPU structure.
3161 * @param pSelReg The segment register that needs fixing.
3162 * @param pszRegName The register name (for logging and assertions).
3163 */
3164static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3165{
3166 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3167
3168 /*
3169 * If VT-x marks the segment as unusable, most other bits remain undefined:
3170 * - For CS the L, D and G bits have meaning.
3171 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3172 * - For the remaining data segments no bits are defined.
3173 *
3174 * The present bit and the unusable bit has been observed to be set at the
3175 * same time (the selector was supposed to be invalid as we started executing
3176 * a V8086 interrupt in ring-0).
3177 *
3178 * What should be important for the rest of the VBox code, is that the P bit is
3179 * cleared. Some of the other VBox code recognizes the unusable bit, but
3180 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3181 * safe side here, we'll strip off P and other bits we don't care about. If
3182 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3183 *
3184 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3185 */
3186#ifdef VBOX_STRICT
3187 uint32_t const uAttr = pSelReg->Attr.u;
3188#endif
3189
3190 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3191 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3192 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3193
3194#ifdef VBOX_STRICT
3195# ifndef IN_NEM_DARWIN
3196 VMMRZCallRing3Disable(pVCpu);
3197# endif
3198 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3199# ifdef DEBUG_bird
3200 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3201 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3202 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3203# endif
3204# ifndef IN_NEM_DARWIN
3205 VMMRZCallRing3Enable(pVCpu);
3206# endif
3207 NOREF(uAttr);
3208#endif
3209 RT_NOREF2(pVCpu, pszRegName);
3210}
3211
3212
3213/**
3214 * Imports a guest segment register from the current VMCS into the guest-CPU
3215 * context.
3216 *
3217 * @param pVCpu The cross context virtual CPU structure.
3218 * @param iSegReg The segment register number (X86_SREG_XXX).
3219 *
3220 * @remarks Called with interrupts and/or preemption disabled.
3221 */
3222static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3223{
3224 Assert(iSegReg < X86_SREG_COUNT);
3225 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3226 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3227 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3228 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3229
3230 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3231
3232 uint16_t u16Sel;
3233 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3234 pSelReg->Sel = u16Sel;
3235 pSelReg->ValidSel = u16Sel;
3236
3237 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3238 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3239
3240 uint32_t u32Attr;
3241 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3242 pSelReg->Attr.u = u32Attr;
3243 if (u32Attr & X86DESCATTR_UNUSABLE)
3244 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3245
3246 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3247}
3248
3249
3250/**
3251 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3252 *
3253 * @param pVCpu The cross context virtual CPU structure.
3254 *
3255 * @remarks Called with interrupts and/or preemption disabled.
3256 */
3257static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3258{
3259 uint16_t u16Sel;
3260 uint64_t u64Base;
3261 uint32_t u32Limit, u32Attr;
3262 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3263 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3264 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3265 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3266
3267 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3268 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3269 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3270 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3271 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3272 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3273 if (u32Attr & X86DESCATTR_UNUSABLE)
3274 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3275}
3276
3277
3278/**
3279 * Imports the guest TR from the current VMCS into the guest-CPU context.
3280 *
3281 * @param pVCpu The cross context virtual CPU structure.
3282 *
3283 * @remarks Called with interrupts and/or preemption disabled.
3284 */
3285static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3286{
3287 uint16_t u16Sel;
3288 uint64_t u64Base;
3289 uint32_t u32Limit, u32Attr;
3290 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3291 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3292 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3293 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3294
3295 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3296 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3297 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3298 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3299 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3300 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3301 /* TR is the only selector that can never be unusable. */
3302 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3303}
3304
3305
3306/**
3307 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3308 *
3309 * @param pVCpu The cross context virtual CPU structure.
3310 *
3311 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3312 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3313 * instead!!!
3314 */
3315static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3316{
3317 uint64_t u64Val;
3318 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3319 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3320 {
3321 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3322 AssertRC(rc);
3323
3324 pCtx->rip = u64Val;
3325 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3326 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3327 }
3328}
3329
3330
3331/**
3332 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3333 *
3334 * @param pVCpu The cross context virtual CPU structure.
3335 * @param pVmcsInfo The VMCS info. object.
3336 *
3337 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3338 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3339 * instead!!!
3340 */
3341static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3342{
3343 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3344 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3345 {
3346 uint64_t u64Val;
3347 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3348 AssertRC(rc);
3349
3350 pCtx->rflags.u64 = u64Val;
3351#ifndef IN_NEM_DARWIN
3352 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3353 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3354 {
3355 pCtx->eflags.Bits.u1VM = 0;
3356 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3357 }
3358#else
3359 RT_NOREF(pVmcsInfo);
3360#endif
3361 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3362 }
3363}
3364
3365
3366/**
3367 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3368 * context.
3369 *
3370 * @param pVCpu The cross context virtual CPU structure.
3371 * @param pVmcsInfo The VMCS info. object.
3372 *
3373 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3374 * do not log!
3375 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3376 * instead!!!
3377 */
3378static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3379{
3380 uint32_t u32Val;
3381 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3382 if (!u32Val)
3383 {
3384 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3385 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3386 CPUMSetGuestNmiBlocking(pVCpu, false);
3387 }
3388 else
3389 {
3390 /*
3391 * We must import RIP here to set our EM interrupt-inhibited state.
3392 * We also import RFLAGS as our code that evaluates pending interrupts
3393 * before VM-entry requires it.
3394 */
3395 vmxHCImportGuestRip(pVCpu);
3396 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3397
3398 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3399 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3400 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3401 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3402
3403 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3404 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3405 }
3406}
3407
3408
3409/**
3410 * Worker for VMXR0ImportStateOnDemand.
3411 *
3412 * @returns VBox status code.
3413 * @param pVCpu The cross context virtual CPU structure.
3414 * @param pVmcsInfo The VMCS info. object.
3415 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3416 */
3417static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3418{
3419 int rc = VINF_SUCCESS;
3420 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3421 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3422 uint32_t u32Val;
3423
3424 /*
3425 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3426 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3427 * neither are other host platforms.
3428 *
3429 * Committing this temporarily as it prevents BSOD.
3430 *
3431 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3432 */
3433# ifdef RT_OS_WINDOWS
3434 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3435 return VERR_HM_IPE_1;
3436# endif
3437
3438 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3439
3440#ifndef IN_NEM_DARWIN
3441 /*
3442 * We disable interrupts to make the updating of the state and in particular
3443 * the fExtrn modification atomic wrt to preemption hooks.
3444 */
3445 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3446#endif
3447
3448 fWhat &= pCtx->fExtrn;
3449 if (fWhat)
3450 {
3451 do
3452 {
3453 if (fWhat & CPUMCTX_EXTRN_RIP)
3454 vmxHCImportGuestRip(pVCpu);
3455
3456 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3457 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3458
3459 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3460 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3461
3462 if (fWhat & CPUMCTX_EXTRN_RSP)
3463 {
3464 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3465 AssertRC(rc);
3466 }
3467
3468 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3469 {
3470 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3471#ifndef IN_NEM_DARWIN
3472 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3473#else
3474 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3475#endif
3476 if (fWhat & CPUMCTX_EXTRN_CS)
3477 {
3478 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3479 vmxHCImportGuestRip(pVCpu);
3480 if (fRealOnV86Active)
3481 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3482 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3483 }
3484 if (fWhat & CPUMCTX_EXTRN_SS)
3485 {
3486 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3487 if (fRealOnV86Active)
3488 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3489 }
3490 if (fWhat & CPUMCTX_EXTRN_DS)
3491 {
3492 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3493 if (fRealOnV86Active)
3494 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3495 }
3496 if (fWhat & CPUMCTX_EXTRN_ES)
3497 {
3498 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3499 if (fRealOnV86Active)
3500 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3501 }
3502 if (fWhat & CPUMCTX_EXTRN_FS)
3503 {
3504 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3505 if (fRealOnV86Active)
3506 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_GS)
3509 {
3510 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3511 if (fRealOnV86Active)
3512 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3513 }
3514 }
3515
3516 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3517 {
3518 if (fWhat & CPUMCTX_EXTRN_LDTR)
3519 vmxHCImportGuestLdtr(pVCpu);
3520
3521 if (fWhat & CPUMCTX_EXTRN_GDTR)
3522 {
3523 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3524 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3525 pCtx->gdtr.cbGdt = u32Val;
3526 }
3527
3528 /* Guest IDTR. */
3529 if (fWhat & CPUMCTX_EXTRN_IDTR)
3530 {
3531 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3532 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3533 pCtx->idtr.cbIdt = u32Val;
3534 }
3535
3536 /* Guest TR. */
3537 if (fWhat & CPUMCTX_EXTRN_TR)
3538 {
3539#ifndef IN_NEM_DARWIN
3540 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3541 don't need to import that one. */
3542 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3543#endif
3544 vmxHCImportGuestTr(pVCpu);
3545 }
3546 }
3547
3548 if (fWhat & CPUMCTX_EXTRN_DR7)
3549 {
3550#ifndef IN_NEM_DARWIN
3551 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3552#endif
3553 {
3554 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3555 AssertRC(rc);
3556 }
3557 }
3558
3559 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3560 {
3561 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3562 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3563 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3564 pCtx->SysEnter.cs = u32Val;
3565 }
3566
3567#ifndef IN_NEM_DARWIN
3568 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3569 {
3570 if ( pVM->hmr0.s.fAllow64BitGuests
3571 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3572 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3573 }
3574
3575 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3576 {
3577 if ( pVM->hmr0.s.fAllow64BitGuests
3578 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3579 {
3580 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3581 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3582 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3583 }
3584 }
3585
3586 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3587 {
3588 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3589 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3590 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3591 Assert(pMsrs);
3592 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3593 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3594 for (uint32_t i = 0; i < cMsrs; i++)
3595 {
3596 uint32_t const idMsr = pMsrs[i].u32Msr;
3597 switch (idMsr)
3598 {
3599 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3600 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3601 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3602 default:
3603 {
3604 uint32_t idxLbrMsr;
3605 if (VM_IS_VMX_LBR(pVM))
3606 {
3607 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3608 {
3609 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3610 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3611 break;
3612 }
3613 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3614 {
3615 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3616 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3617 break;
3618 }
3619 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3620 {
3621 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3622 break;
3623 }
3624 /* Fallthru (no break) */
3625 }
3626 pCtx->fExtrn = 0;
3627 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3628 ASMSetFlags(fEFlags);
3629 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3630 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3631 }
3632 }
3633 }
3634 }
3635#endif
3636
3637 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3638 {
3639 if (fWhat & CPUMCTX_EXTRN_CR0)
3640 {
3641 uint64_t u64Cr0;
3642 uint64_t u64Shadow;
3643 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3644 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3645#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3646 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3647 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3648#else
3649 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3650 {
3651 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3652 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3653 }
3654 else
3655 {
3656 /*
3657 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3658 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3659 * re-construct CR0. See @bugref{9180#c95} for details.
3660 */
3661 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3662 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3663 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3664 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3665 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3666 }
3667#endif
3668#ifndef IN_NEM_DARWIN
3669 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3670#endif
3671 CPUMSetGuestCR0(pVCpu, u64Cr0);
3672#ifndef IN_NEM_DARWIN
3673 VMMRZCallRing3Enable(pVCpu);
3674#endif
3675 }
3676
3677 if (fWhat & CPUMCTX_EXTRN_CR4)
3678 {
3679 uint64_t u64Cr4;
3680 uint64_t u64Shadow;
3681 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3682 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3683#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3684 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3685 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3686#else
3687 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3688 {
3689 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3690 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3691 }
3692 else
3693 {
3694 /*
3695 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3696 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3697 * re-construct CR4. See @bugref{9180#c95} for details.
3698 */
3699 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3700 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3701 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3702 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3703 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3704 }
3705#endif
3706 pCtx->cr4 = u64Cr4;
3707 }
3708
3709 if (fWhat & CPUMCTX_EXTRN_CR3)
3710 {
3711 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3712 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3713 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3714 && CPUMIsGuestPagingEnabledEx(pCtx)))
3715 {
3716 uint64_t u64Cr3;
3717 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3718 if (pCtx->cr3 != u64Cr3)
3719 {
3720 pCtx->cr3 = u64Cr3;
3721 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3722 }
3723
3724 /*
3725 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3726 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3727 */
3728 if (CPUMIsGuestInPAEModeEx(pCtx))
3729 {
3730 X86PDPE aPaePdpes[4];
3731 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3732 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3733 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3734 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3735 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3736 {
3737 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3738 /* PGM now updates PAE PDPTEs while updating CR3. */
3739 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3740 }
3741 }
3742 }
3743 }
3744 }
3745
3746#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3747 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3748 {
3749 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3750 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3751 {
3752 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3753 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3754 if (RT_SUCCESS(rc))
3755 { /* likely */ }
3756 else
3757 break;
3758 }
3759 }
3760#endif
3761 } while (0);
3762
3763 if (RT_SUCCESS(rc))
3764 {
3765 /* Update fExtrn. */
3766 pCtx->fExtrn &= ~fWhat;
3767
3768 /* If everything has been imported, clear the HM keeper bit. */
3769 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3770 {
3771#ifndef IN_NEM_DARWIN
3772 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3773#else
3774 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3775#endif
3776 Assert(!pCtx->fExtrn);
3777 }
3778 }
3779 }
3780#ifndef IN_NEM_DARWIN
3781 else
3782 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3783
3784 /*
3785 * Restore interrupts.
3786 */
3787 ASMSetFlags(fEFlags);
3788#endif
3789
3790 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3791
3792 if (RT_SUCCESS(rc))
3793 { /* likely */ }
3794 else
3795 return rc;
3796
3797 /*
3798 * Honor any pending CR3 updates.
3799 *
3800 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3801 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3802 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3803 *
3804 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3805 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3806 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3807 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3808 *
3809 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3810 *
3811 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3812 */
3813 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3814#ifndef IN_NEM_DARWIN
3815 && VMMRZCallRing3IsEnabled(pVCpu)
3816#endif
3817 )
3818 {
3819 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3820 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3821 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3822 }
3823
3824 return VINF_SUCCESS;
3825}
3826
3827
3828/**
3829 * Check per-VM and per-VCPU force flag actions that require us to go back to
3830 * ring-3 for one reason or another.
3831 *
3832 * @returns Strict VBox status code (i.e. informational status codes too)
3833 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3834 * ring-3.
3835 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3836 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3837 * interrupts)
3838 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3839 * all EMTs to be in ring-3.
3840 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3841 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3842 * to the EM loop.
3843 *
3844 * @param pVCpu The cross context virtual CPU structure.
3845 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3846 * @param fStepping Whether we are single-stepping the guest using the
3847 * hypervisor debugger.
3848 *
3849 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3850 * is no longer in VMX non-root mode.
3851 */
3852static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3853{
3854#ifndef IN_NEM_DARWIN
3855 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3856#endif
3857
3858 /*
3859 * Update pending interrupts into the APIC's IRR.
3860 */
3861 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3862 APICUpdatePendingInterrupts(pVCpu);
3863
3864 /*
3865 * Anything pending? Should be more likely than not if we're doing a good job.
3866 */
3867 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3868 if ( !fStepping
3869 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3870 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3871 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3872 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3873 return VINF_SUCCESS;
3874
3875 /* Pending PGM C3 sync. */
3876 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3877 {
3878 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3879 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3880 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3881 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3882 if (rcStrict != VINF_SUCCESS)
3883 {
3884 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3885 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3886 return rcStrict;
3887 }
3888 }
3889
3890 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3891 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3892 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3893 {
3894 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3895 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3896 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3897 return rc;
3898 }
3899
3900 /* Pending VM request packets, such as hardware interrupts. */
3901 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3902 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3903 {
3904 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3905 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3906 return VINF_EM_PENDING_REQUEST;
3907 }
3908
3909 /* Pending PGM pool flushes. */
3910 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3911 {
3912 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3913 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3914 return VINF_PGM_POOL_FLUSH_PENDING;
3915 }
3916
3917 /* Pending DMA requests. */
3918 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3919 {
3920 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3921 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3922 return VINF_EM_RAW_TO_R3;
3923 }
3924
3925#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3926 /*
3927 * Pending nested-guest events.
3928 *
3929 * Please note the priority of these events are specified and important.
3930 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3931 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3932 */
3933 if (fIsNestedGuest)
3934 {
3935 /* Pending nested-guest APIC-write. */
3936 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3937 {
3938 Log4Func(("Pending nested-guest APIC-write\n"));
3939 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3940 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3941 return rcStrict;
3942 }
3943
3944 /* Pending nested-guest monitor-trap flag (MTF). */
3945 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3946 {
3947 Log4Func(("Pending nested-guest MTF\n"));
3948 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3949 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3950 return rcStrict;
3951 }
3952
3953 /* Pending nested-guest VMX-preemption timer expired. */
3954 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3955 {
3956 Log4Func(("Pending nested-guest preempt timer\n"));
3957 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3958 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3959 return rcStrict;
3960 }
3961 }
3962#else
3963 NOREF(fIsNestedGuest);
3964#endif
3965
3966 return VINF_SUCCESS;
3967}
3968
3969
3970/**
3971 * Converts any TRPM trap into a pending HM event. This is typically used when
3972 * entering from ring-3 (not longjmp returns).
3973 *
3974 * @param pVCpu The cross context virtual CPU structure.
3975 */
3976static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3977{
3978 Assert(TRPMHasTrap(pVCpu));
3979 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3980
3981 uint8_t uVector;
3982 TRPMEVENT enmTrpmEvent;
3983 uint32_t uErrCode;
3984 RTGCUINTPTR GCPtrFaultAddress;
3985 uint8_t cbInstr;
3986 bool fIcebp;
3987
3988 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3989 AssertRC(rc);
3990
3991 uint32_t u32IntInfo;
3992 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
3993 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
3994
3995 rc = TRPMResetTrap(pVCpu);
3996 AssertRC(rc);
3997 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
3998 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
3999
4000 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4001}
4002
4003
4004/**
4005 * Converts the pending HM event into a TRPM trap.
4006 *
4007 * @param pVCpu The cross context virtual CPU structure.
4008 */
4009static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4010{
4011 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4012
4013 /* If a trap was already pending, we did something wrong! */
4014 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4015
4016 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4017 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4018 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4019
4020 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4021
4022 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4023 AssertRC(rc);
4024
4025 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4026 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4027
4028 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4029 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4030 else
4031 {
4032 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4033 switch (uVectorType)
4034 {
4035 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4036 TRPMSetTrapDueToIcebp(pVCpu);
4037 RT_FALL_THRU();
4038 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4039 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4040 {
4041 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4042 || ( uVector == X86_XCPT_BP /* INT3 */
4043 || uVector == X86_XCPT_OF /* INTO */
4044 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4045 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4046 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4047 break;
4048 }
4049 }
4050 }
4051
4052 /* We're now done converting the pending event. */
4053 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4054}
4055
4056
4057/**
4058 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4059 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4060 *
4061 * @param pVCpu The cross context virtual CPU structure.
4062 * @param pVmcsInfo The VMCS info. object.
4063 */
4064static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4065{
4066 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4067 {
4068 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4069 {
4070 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4071 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4072 AssertRC(rc);
4073 }
4074 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4075}
4076
4077
4078/**
4079 * Clears the interrupt-window exiting control in the VMCS.
4080 *
4081 * @param pVCpu The cross context virtual CPU structure.
4082 * @param pVmcsInfo The VMCS info. object.
4083 */
4084DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4085{
4086 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4087 {
4088 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4089 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4090 AssertRC(rc);
4091 }
4092}
4093
4094
4095/**
4096 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4097 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4098 *
4099 * @param pVCpu The cross context virtual CPU structure.
4100 * @param pVmcsInfo The VMCS info. object.
4101 */
4102static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4103{
4104 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4105 {
4106 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4107 {
4108 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4109 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4110 AssertRC(rc);
4111 Log4Func(("Setup NMI-window exiting\n"));
4112 }
4113 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4114}
4115
4116
4117/**
4118 * Clears the NMI-window exiting control in the VMCS.
4119 *
4120 * @param pVCpu The cross context virtual CPU structure.
4121 * @param pVmcsInfo The VMCS info. object.
4122 */
4123DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4124{
4125 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4126 {
4127 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4128 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4129 AssertRC(rc);
4130 }
4131}
4132
4133
4134/**
4135 * Injects an event into the guest upon VM-entry by updating the relevant fields
4136 * in the VM-entry area in the VMCS.
4137 *
4138 * @returns Strict VBox status code (i.e. informational status codes too).
4139 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4140 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4141 *
4142 * @param pVCpu The cross context virtual CPU structure.
4143 * @param pVmxTransient The VMX-transient structure.
4144 * @param pEvent The event being injected.
4145 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4146 * will be updated if necessary. This cannot not be NULL.
4147 * @param fStepping Whether we're single-stepping guest execution and should
4148 * return VINF_EM_DBG_STEPPED if the event is injected
4149 * directly (registers modified by us, not by hardware on
4150 * VM-entry).
4151 */
4152static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping,
4153 uint32_t *pfIntrState)
4154{
4155 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4156 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4157 Assert(pfIntrState);
4158
4159#ifdef IN_NEM_DARWIN
4160 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4161#endif
4162
4163 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4164 uint32_t u32IntInfo = pEvent->u64IntInfo;
4165 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4166 uint32_t const cbInstr = pEvent->cbInstr;
4167 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4168 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4169 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4170
4171#ifdef VBOX_STRICT
4172 /*
4173 * Validate the error-code-valid bit for hardware exceptions.
4174 * No error codes for exceptions in real-mode.
4175 *
4176 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4177 */
4178 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4179 && !CPUMIsGuestInRealModeEx(pCtx))
4180 {
4181 switch (uVector)
4182 {
4183 case X86_XCPT_PF:
4184 case X86_XCPT_DF:
4185 case X86_XCPT_TS:
4186 case X86_XCPT_NP:
4187 case X86_XCPT_SS:
4188 case X86_XCPT_GP:
4189 case X86_XCPT_AC:
4190 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4191 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4192 RT_FALL_THRU();
4193 default:
4194 break;
4195 }
4196 }
4197
4198 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4199 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4200 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4201#endif
4202
4203 RT_NOREF(uVector);
4204 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4205 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4206 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4207 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4208 {
4209 Assert(uVector <= X86_XCPT_LAST);
4210 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4211 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4212 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4213 }
4214 else
4215 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4216
4217 /*
4218 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4219 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4220 * interrupt handler in the (real-mode) guest.
4221 *
4222 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4223 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4224 */
4225 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4226 {
4227#ifndef IN_NEM_DARWIN
4228 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4229#endif
4230 {
4231 /*
4232 * For CPUs with unrestricted guest execution enabled and with the guest
4233 * in real-mode, we must not set the deliver-error-code bit.
4234 *
4235 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4236 */
4237 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4238 }
4239#ifndef IN_NEM_DARWIN
4240 else
4241 {
4242 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4243 Assert(PDMVmmDevHeapIsEnabled(pVM));
4244 Assert(pVM->hm.s.vmx.pRealModeTSS);
4245 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4246
4247 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4248 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4249 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4250 AssertRCReturn(rc2, rc2);
4251
4252 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4253 size_t const cbIdtEntry = sizeof(X86IDTR16);
4254 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4255 {
4256 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4257 if (uVector == X86_XCPT_DF)
4258 return VINF_EM_RESET;
4259
4260 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4261 No error codes for exceptions in real-mode. */
4262 if (uVector == X86_XCPT_GP)
4263 {
4264 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4265 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4266 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4267 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4268 HMEVENT EventXcptDf;
4269 RT_ZERO(EventXcptDf);
4270 EventXcptDf.u64IntInfo = uXcptDfInfo;
4271 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4272 }
4273
4274 /*
4275 * If we're injecting an event with no valid IDT entry, inject a #GP.
4276 * No error codes for exceptions in real-mode.
4277 *
4278 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4279 */
4280 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4281 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4282 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4283 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4284 HMEVENT EventXcptGp;
4285 RT_ZERO(EventXcptGp);
4286 EventXcptGp.u64IntInfo = uXcptGpInfo;
4287 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4288 }
4289
4290 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4291 uint16_t uGuestIp = pCtx->ip;
4292 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4293 {
4294 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4295 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4296 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4297 }
4298 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4299 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4300
4301 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4302 X86IDTR16 IdtEntry;
4303 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4304 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4305 AssertRCReturn(rc2, rc2);
4306
4307 /* Construct the stack frame for the interrupt/exception handler. */
4308 VBOXSTRICTRC rcStrict;
4309 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4310 if (rcStrict == VINF_SUCCESS)
4311 {
4312 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4313 if (rcStrict == VINF_SUCCESS)
4314 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4315 }
4316
4317 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4318 if (rcStrict == VINF_SUCCESS)
4319 {
4320 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4321 pCtx->rip = IdtEntry.offSel;
4322 pCtx->cs.Sel = IdtEntry.uSel;
4323 pCtx->cs.ValidSel = IdtEntry.uSel;
4324 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4325 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4326 && uVector == X86_XCPT_PF)
4327 pCtx->cr2 = GCPtrFault;
4328
4329 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4330 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4331 | HM_CHANGED_GUEST_RSP);
4332
4333 /*
4334 * If we delivered a hardware exception (other than an NMI) and if there was
4335 * block-by-STI in effect, we should clear it.
4336 */
4337 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4338 {
4339 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4340 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4341 Log4Func(("Clearing inhibition due to STI\n"));
4342 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4343 }
4344
4345 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4346 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4347
4348 /*
4349 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4350 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4351 */
4352 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4353
4354 /*
4355 * If we eventually support nested-guest execution without unrestricted guest execution,
4356 * we should set fInterceptEvents here.
4357 */
4358 Assert(!fIsNestedGuest);
4359
4360 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4361 if (fStepping)
4362 rcStrict = VINF_EM_DBG_STEPPED;
4363 }
4364 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4365 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4366 return rcStrict;
4367 }
4368#else
4369 RT_NOREF(pVmcsInfo);
4370#endif
4371 }
4372
4373 /*
4374 * Validate.
4375 */
4376 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4377 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4378
4379 /*
4380 * Inject the event into the VMCS.
4381 */
4382 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4383 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4384 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4385 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4386 AssertRC(rc);
4387
4388 /*
4389 * Update guest CR2 if this is a page-fault.
4390 */
4391 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4392 pCtx->cr2 = GCPtrFault;
4393
4394 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4395 return VINF_SUCCESS;
4396}
4397
4398
4399/**
4400 * Evaluates the event to be delivered to the guest and sets it as the pending
4401 * event.
4402 *
4403 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4404 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4405 * NOT restore these force-flags.
4406 *
4407 * @returns Strict VBox status code (i.e. informational status codes too).
4408 * @param pVCpu The cross context virtual CPU structure.
4409 * @param pVmcsInfo The VMCS information structure.
4410 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4411 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4412 */
4413static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4414{
4415 Assert(pfIntrState);
4416 Assert(!TRPMHasTrap(pVCpu));
4417
4418 /*
4419 * Compute/update guest-interruptibility state related FFs.
4420 * The FFs will be used below while evaluating events to be injected.
4421 */
4422 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4423
4424 /*
4425 * Evaluate if a new event needs to be injected.
4426 * An event that's already pending has already performed all necessary checks.
4427 */
4428 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4429 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4430 {
4431 /** @todo SMI. SMIs take priority over NMIs. */
4432
4433 /*
4434 * NMIs.
4435 * NMIs take priority over external interrupts.
4436 */
4437#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4438 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4439#endif
4440 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4441 {
4442 /*
4443 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4444 *
4445 * For a nested-guest, the FF always indicates the outer guest's ability to
4446 * receive an NMI while the guest-interruptibility state bit depends on whether
4447 * the nested-hypervisor is using virtual-NMIs.
4448 */
4449 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4450 {
4451#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4452 if ( fIsNestedGuest
4453 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4454 return IEMExecVmxVmexitXcptNmi(pVCpu);
4455#endif
4456 vmxHCSetPendingXcptNmi(pVCpu);
4457 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4458 Log4Func(("NMI pending injection\n"));
4459
4460 /* We've injected the NMI, bail. */
4461 return VINF_SUCCESS;
4462 }
4463 else if (!fIsNestedGuest)
4464 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4465 }
4466
4467 /*
4468 * External interrupts (PIC/APIC).
4469 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4470 * We cannot re-request the interrupt from the controller again.
4471 */
4472 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4473 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4474 {
4475 Assert(!DBGFIsStepping(pVCpu));
4476 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4477 AssertRC(rc);
4478
4479 /*
4480 * We must not check EFLAGS directly when executing a nested-guest, use
4481 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4482 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4483 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4484 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4485 *
4486 * See Intel spec. 25.4.1 "Event Blocking".
4487 */
4488 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4489 {
4490#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4491 if ( fIsNestedGuest
4492 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4493 {
4494 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4495 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4496 return rcStrict;
4497 }
4498#endif
4499 uint8_t u8Interrupt;
4500 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4501 if (RT_SUCCESS(rc))
4502 {
4503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4504 if ( fIsNestedGuest
4505 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4506 {
4507 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4508 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4509 return rcStrict;
4510 }
4511#endif
4512 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4513 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4514 }
4515 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4516 {
4517 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4518
4519 if ( !fIsNestedGuest
4520 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4521 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4522 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4523
4524 /*
4525 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4526 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4527 * need to re-set this force-flag here.
4528 */
4529 }
4530 else
4531 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4532
4533 /* We've injected the interrupt or taken necessary action, bail. */
4534 return VINF_SUCCESS;
4535 }
4536 if (!fIsNestedGuest)
4537 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4538 }
4539 }
4540 else if (!fIsNestedGuest)
4541 {
4542 /*
4543 * An event is being injected or we are in an interrupt shadow. Check if another event is
4544 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4545 * the pending event.
4546 */
4547 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4548 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4549 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4550 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4551 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4552 }
4553 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4554
4555 return VINF_SUCCESS;
4556}
4557
4558
4559/**
4560 * Injects any pending events into the guest if the guest is in a state to
4561 * receive them.
4562 *
4563 * @returns Strict VBox status code (i.e. informational status codes too).
4564 * @param pVCpu The cross context virtual CPU structure.
4565 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4566 * @param fIntrState The VT-x guest-interruptibility state.
4567 * @param fStepping Whether we are single-stepping the guest using the
4568 * hypervisor debugger and should return
4569 * VINF_EM_DBG_STEPPED if the event was dispatched
4570 * directly.
4571 */
4572static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping)
4573{
4574 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4575#ifndef IN_NEM_DARWIN
4576 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4577#endif
4578
4579#ifdef VBOX_STRICT
4580 /*
4581 * Verify guest-interruptibility state.
4582 *
4583 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4584 * since injecting an event may modify the interruptibility state and we must thus always
4585 * use fIntrState.
4586 */
4587 {
4588 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4589 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4590 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4591 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4592 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4593 Assert(!TRPMHasTrap(pVCpu));
4594 NOREF(fBlockMovSS); NOREF(fBlockSti);
4595 }
4596#endif
4597
4598 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4599 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4600 {
4601 /*
4602 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4603 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4604 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4605 *
4606 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4607 */
4608 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4609#ifdef VBOX_STRICT
4610 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4611 {
4612 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4613 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4614 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4615 }
4616 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4617 {
4618 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4619 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4620 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4621 }
4622#endif
4623 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4624 uIntType));
4625
4626 /*
4627 * Inject the event and get any changes to the guest-interruptibility state.
4628 *
4629 * The guest-interruptibility state may need to be updated if we inject the event
4630 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4631 */
4632 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4633 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4634
4635 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4636 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4637 else
4638 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4639 }
4640
4641 /*
4642 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4643 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4644 */
4645 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4646 && !fIsNestedGuest)
4647 {
4648 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4649
4650 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4651 {
4652 /*
4653 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4654 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4655 */
4656 Assert(!DBGFIsStepping(pVCpu));
4657 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4658 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4659 AssertRC(rc);
4660 }
4661 else
4662 {
4663 /*
4664 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4665 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4666 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4667 * we use MTF, so just make sure it's called before executing guest-code.
4668 */
4669 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4670 }
4671 }
4672 /* else: for nested-guest currently handling while merging controls. */
4673
4674 /*
4675 * Finally, update the guest-interruptibility state.
4676 *
4677 * This is required for the real-on-v86 software interrupt injection, for
4678 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4679 */
4680 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4681 AssertRC(rc);
4682
4683 /*
4684 * There's no need to clear the VM-entry interruption-information field here if we're not
4685 * injecting anything. VT-x clears the valid bit on every VM-exit.
4686 *
4687 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4688 */
4689
4690 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4691 return rcStrict;
4692}
4693
4694
4695/**
4696 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4697 * and update error record fields accordingly.
4698 *
4699 * @returns VMX_IGS_* error codes.
4700 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4701 * wrong with the guest state.
4702 *
4703 * @param pVCpu The cross context virtual CPU structure.
4704 * @param pVmcsInfo The VMCS info. object.
4705 *
4706 * @remarks This function assumes our cache of the VMCS controls
4707 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4708 */
4709static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4710{
4711#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4712#define HMVMX_CHECK_BREAK(expr, err) do { \
4713 if (!(expr)) { uError = (err); break; } \
4714 } while (0)
4715
4716 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4717 uint32_t uError = VMX_IGS_ERROR;
4718 uint32_t u32IntrState = 0;
4719#ifndef IN_NEM_DARWIN
4720 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4721 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4722#else
4723 bool const fUnrestrictedGuest = true;
4724#endif
4725 do
4726 {
4727 int rc;
4728
4729 /*
4730 * Guest-interruptibility state.
4731 *
4732 * Read this first so that any check that fails prior to those that actually
4733 * require the guest-interruptibility state would still reflect the correct
4734 * VMCS value and avoids causing further confusion.
4735 */
4736 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4737 AssertRC(rc);
4738
4739 uint32_t u32Val;
4740 uint64_t u64Val;
4741
4742 /*
4743 * CR0.
4744 */
4745 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4746 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4747 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4748 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4749 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4750 if (fUnrestrictedGuest)
4751 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4752
4753 uint64_t u64GuestCr0;
4754 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4755 AssertRC(rc);
4756 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4757 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4758 if ( !fUnrestrictedGuest
4759 && (u64GuestCr0 & X86_CR0_PG)
4760 && !(u64GuestCr0 & X86_CR0_PE))
4761 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4762
4763 /*
4764 * CR4.
4765 */
4766 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4767 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4768 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4769
4770 uint64_t u64GuestCr4;
4771 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4772 AssertRC(rc);
4773 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4774 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4775
4776 /*
4777 * IA32_DEBUGCTL MSR.
4778 */
4779 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4780 AssertRC(rc);
4781 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4782 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4783 {
4784 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4785 }
4786 uint64_t u64DebugCtlMsr = u64Val;
4787
4788#ifdef VBOX_STRICT
4789 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4790 AssertRC(rc);
4791 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4792#endif
4793 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4794
4795 /*
4796 * RIP and RFLAGS.
4797 */
4798 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4799 AssertRC(rc);
4800 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4801 if ( !fLongModeGuest
4802 || !pCtx->cs.Attr.n.u1Long)
4803 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4804 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4805 * must be identical if the "IA-32e mode guest" VM-entry
4806 * control is 1 and CS.L is 1. No check applies if the
4807 * CPU supports 64 linear-address bits. */
4808
4809 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4810 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4811 AssertRC(rc);
4812 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4813 VMX_IGS_RFLAGS_RESERVED);
4814 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4815 uint32_t const u32Eflags = u64Val;
4816
4817 if ( fLongModeGuest
4818 || ( fUnrestrictedGuest
4819 && !(u64GuestCr0 & X86_CR0_PE)))
4820 {
4821 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4822 }
4823
4824 uint32_t u32EntryInfo;
4825 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4826 AssertRC(rc);
4827 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4828 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4829
4830 /*
4831 * 64-bit checks.
4832 */
4833 if (fLongModeGuest)
4834 {
4835 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4836 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4837 }
4838
4839 if ( !fLongModeGuest
4840 && (u64GuestCr4 & X86_CR4_PCIDE))
4841 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4842
4843 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4844 * 51:32 beyond the processor's physical-address width are 0. */
4845
4846 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4847 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4848 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4849
4850#ifndef IN_NEM_DARWIN
4851 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4852 AssertRC(rc);
4853 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4854
4855 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4856 AssertRC(rc);
4857 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4858#endif
4859
4860 /*
4861 * PERF_GLOBAL MSR.
4862 */
4863 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4864 {
4865 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4866 AssertRC(rc);
4867 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4868 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4869 }
4870
4871 /*
4872 * PAT MSR.
4873 */
4874 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4875 {
4876 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4877 AssertRC(rc);
4878 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4879 for (unsigned i = 0; i < 8; i++)
4880 {
4881 uint8_t u8Val = (u64Val & 0xff);
4882 if ( u8Val != 0 /* UC */
4883 && u8Val != 1 /* WC */
4884 && u8Val != 4 /* WT */
4885 && u8Val != 5 /* WP */
4886 && u8Val != 6 /* WB */
4887 && u8Val != 7 /* UC- */)
4888 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4889 u64Val >>= 8;
4890 }
4891 }
4892
4893 /*
4894 * EFER MSR.
4895 */
4896 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4897 {
4898 Assert(g_fHmVmxSupportsVmcsEfer);
4899 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4900 AssertRC(rc);
4901 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4902 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4903 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4904 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4905 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4906 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4907 * iemVmxVmentryCheckGuestState(). */
4908 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4909 || !(u64GuestCr0 & X86_CR0_PG)
4910 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4911 VMX_IGS_EFER_LMA_LME_MISMATCH);
4912 }
4913
4914 /*
4915 * Segment registers.
4916 */
4917 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4918 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4919 if (!(u32Eflags & X86_EFL_VM))
4920 {
4921 /* CS */
4922 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4923 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4924 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4925 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4926 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4927 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4928 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4929 /* CS cannot be loaded with NULL in protected mode. */
4930 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4931 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4932 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4933 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4934 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4935 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4936 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4937 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4938 else
4939 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4940
4941 /* SS */
4942 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4943 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4944 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4945 if ( !(pCtx->cr0 & X86_CR0_PE)
4946 || pCtx->cs.Attr.n.u4Type == 3)
4947 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4948
4949 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4950 {
4951 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4952 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4953 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4954 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4955 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4956 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4957 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4958 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4959 }
4960
4961 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4962 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4963 {
4964 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4965 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4966 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4967 || pCtx->ds.Attr.n.u4Type > 11
4968 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4969 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4970 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4971 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4972 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4973 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4974 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4975 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4976 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4977 }
4978 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4979 {
4980 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4981 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4982 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4983 || pCtx->es.Attr.n.u4Type > 11
4984 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4985 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4986 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4987 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4988 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4989 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4990 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4991 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4992 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
4993 }
4994 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4995 {
4996 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
4997 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
4998 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4999 || pCtx->fs.Attr.n.u4Type > 11
5000 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5001 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5002 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5003 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5004 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5005 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5006 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5007 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5008 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5009 }
5010 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5011 {
5012 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5013 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5014 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5015 || pCtx->gs.Attr.n.u4Type > 11
5016 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5017 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5018 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5019 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5020 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5021 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5022 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5023 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5024 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5025 }
5026 /* 64-bit capable CPUs. */
5027 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5028 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5029 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5030 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5031 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5032 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5033 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5034 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5035 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5036 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5037 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5038 }
5039 else
5040 {
5041 /* V86 mode checks. */
5042 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5043 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5044 {
5045 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5046 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5047 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5048 }
5049 else
5050 {
5051 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5052 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5053 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5054 }
5055
5056 /* CS */
5057 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5058 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5059 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5060 /* SS */
5061 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5062 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5063 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5064 /* DS */
5065 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5066 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5067 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5068 /* ES */
5069 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5070 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5071 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5072 /* FS */
5073 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5074 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5075 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5076 /* GS */
5077 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5078 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5079 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5080 /* 64-bit capable CPUs. */
5081 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5082 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5083 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5084 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5085 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5086 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5087 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5088 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5089 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5090 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5091 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5092 }
5093
5094 /*
5095 * TR.
5096 */
5097 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5098 /* 64-bit capable CPUs. */
5099 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5100 if (fLongModeGuest)
5101 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5102 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5103 else
5104 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5105 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5106 VMX_IGS_TR_ATTR_TYPE_INVALID);
5107 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5108 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5109 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5110 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5111 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5112 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5113 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5114 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5115
5116 /*
5117 * GDTR and IDTR (64-bit capable checks).
5118 */
5119 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5120 AssertRC(rc);
5121 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5122
5123 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5124 AssertRC(rc);
5125 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5126
5127 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5128 AssertRC(rc);
5129 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5130
5131 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5132 AssertRC(rc);
5133 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5134
5135 /*
5136 * Guest Non-Register State.
5137 */
5138 /* Activity State. */
5139 uint32_t u32ActivityState;
5140 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5141 AssertRC(rc);
5142 HMVMX_CHECK_BREAK( !u32ActivityState
5143 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5144 VMX_IGS_ACTIVITY_STATE_INVALID);
5145 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5146 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5147
5148 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5149 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5150 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5151
5152 /** @todo Activity state and injecting interrupts. Left as a todo since we
5153 * currently don't use activity states but ACTIVE. */
5154
5155 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5156 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5157
5158 /* Guest interruptibility-state. */
5159 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5160 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5161 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5162 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5163 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5164 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5165 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5166 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5167 {
5168 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5169 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5170 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5171 }
5172 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5173 {
5174 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5175 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5176 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5177 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5178 }
5179 /** @todo Assumes the processor is not in SMM. */
5180 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5181 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5182 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5183 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5184 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5185 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5186 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5187 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5188
5189 /* Pending debug exceptions. */
5190 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5191 AssertRC(rc);
5192 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5193 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5194 u32Val = u64Val; /* For pending debug exceptions checks below. */
5195
5196 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5197 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5198 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5199 {
5200 if ( (u32Eflags & X86_EFL_TF)
5201 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5202 {
5203 /* Bit 14 is PendingDebug.BS. */
5204 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5205 }
5206 if ( !(u32Eflags & X86_EFL_TF)
5207 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5208 {
5209 /* Bit 14 is PendingDebug.BS. */
5210 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5211 }
5212 }
5213
5214#ifndef IN_NEM_DARWIN
5215 /* VMCS link pointer. */
5216 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5217 AssertRC(rc);
5218 if (u64Val != UINT64_C(0xffffffffffffffff))
5219 {
5220 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5221 /** @todo Bits beyond the processor's physical-address width MBZ. */
5222 /** @todo SMM checks. */
5223 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5224 Assert(pVmcsInfo->pvShadowVmcs);
5225 VMXVMCSREVID VmcsRevId;
5226 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5227 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5228 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5229 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5230 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5231 }
5232
5233 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5234 * not using nested paging? */
5235 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5236 && !fLongModeGuest
5237 && CPUMIsGuestInPAEModeEx(pCtx))
5238 {
5239 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5240 AssertRC(rc);
5241 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5242
5243 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5244 AssertRC(rc);
5245 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5246
5247 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5248 AssertRC(rc);
5249 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5250
5251 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5252 AssertRC(rc);
5253 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5254 }
5255#endif
5256
5257 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5258 if (uError == VMX_IGS_ERROR)
5259 uError = VMX_IGS_REASON_NOT_FOUND;
5260 } while (0);
5261
5262 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5263 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5264 return uError;
5265
5266#undef HMVMX_ERROR_BREAK
5267#undef HMVMX_CHECK_BREAK
5268}
5269/** @} */
5270
5271
5272#ifndef HMVMX_USE_FUNCTION_TABLE
5273/**
5274 * Handles a guest VM-exit from hardware-assisted VMX execution.
5275 *
5276 * @returns Strict VBox status code (i.e. informational status codes too).
5277 * @param pVCpu The cross context virtual CPU structure.
5278 * @param pVmxTransient The VMX-transient structure.
5279 */
5280DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5281{
5282#ifdef DEBUG_ramshankar
5283# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5284 do { \
5285 if (a_fSave != 0) \
5286 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5287 VBOXSTRICTRC rcStrict = a_CallExpr; \
5288 if (a_fSave != 0) \
5289 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5290 return rcStrict; \
5291 } while (0)
5292#else
5293# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5294#endif
5295 uint32_t const uExitReason = pVmxTransient->uExitReason;
5296 switch (uExitReason)
5297 {
5298 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5299 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5300 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5301 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5302 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5303 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5304 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5305 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5306 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5307 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5308 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5309 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5310 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5311 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5312 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5313 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5314 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5315 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5316 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5317 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5318 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5319 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5320 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5321 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5322 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5323 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5324 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5325 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5326 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5327 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5328#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5329 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5330 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5331 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5332 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5333 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5334 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5335 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5336 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5337 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5338 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5339 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient));
5340#else
5341 case VMX_EXIT_VMCLEAR:
5342 case VMX_EXIT_VMLAUNCH:
5343 case VMX_EXIT_VMPTRLD:
5344 case VMX_EXIT_VMPTRST:
5345 case VMX_EXIT_VMREAD:
5346 case VMX_EXIT_VMRESUME:
5347 case VMX_EXIT_VMWRITE:
5348 case VMX_EXIT_VMXOFF:
5349 case VMX_EXIT_VMXON:
5350 case VMX_EXIT_INVVPID:
5351 case VMX_EXIT_INVEPT:
5352 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5353#endif
5354
5355 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5356 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5357 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5358
5359 case VMX_EXIT_INIT_SIGNAL:
5360 case VMX_EXIT_SIPI:
5361 case VMX_EXIT_IO_SMI:
5362 case VMX_EXIT_SMI:
5363 case VMX_EXIT_ERR_MSR_LOAD:
5364 case VMX_EXIT_ERR_MACHINE_CHECK:
5365 case VMX_EXIT_PML_FULL:
5366 case VMX_EXIT_VIRTUALIZED_EOI:
5367 case VMX_EXIT_GDTR_IDTR_ACCESS:
5368 case VMX_EXIT_LDTR_TR_ACCESS:
5369 case VMX_EXIT_APIC_WRITE:
5370 case VMX_EXIT_RDRAND:
5371 case VMX_EXIT_RSM:
5372 case VMX_EXIT_VMFUNC:
5373 case VMX_EXIT_ENCLS:
5374 case VMX_EXIT_RDSEED:
5375 case VMX_EXIT_XSAVES:
5376 case VMX_EXIT_XRSTORS:
5377 case VMX_EXIT_UMWAIT:
5378 case VMX_EXIT_TPAUSE:
5379 case VMX_EXIT_LOADIWKEY:
5380 default:
5381 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5382 }
5383#undef VMEXIT_CALL_RET
5384}
5385#endif /* !HMVMX_USE_FUNCTION_TABLE */
5386
5387
5388#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5389/**
5390 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5391 *
5392 * @returns Strict VBox status code (i.e. informational status codes too).
5393 * @param pVCpu The cross context virtual CPU structure.
5394 * @param pVmxTransient The VMX-transient structure.
5395 */
5396DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5397{
5398 uint32_t const uExitReason = pVmxTransient->uExitReason;
5399 switch (uExitReason)
5400 {
5401 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5402 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5403 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5404 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5405 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5406
5407 /*
5408 * We shouldn't direct host physical interrupts to the nested-guest.
5409 */
5410 case VMX_EXIT_EXT_INT:
5411 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5412
5413 /*
5414 * Instructions that cause VM-exits unconditionally or the condition is
5415 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5416 * happens, it's guaranteed to be a nested-guest VM-exit).
5417 *
5418 * - Provides VM-exit instruction length ONLY.
5419 */
5420 case VMX_EXIT_CPUID: /* Unconditional. */
5421 case VMX_EXIT_VMCALL:
5422 case VMX_EXIT_GETSEC:
5423 case VMX_EXIT_INVD:
5424 case VMX_EXIT_XSETBV:
5425 case VMX_EXIT_VMLAUNCH:
5426 case VMX_EXIT_VMRESUME:
5427 case VMX_EXIT_VMXOFF:
5428 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5429 case VMX_EXIT_VMFUNC:
5430 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5431
5432 /*
5433 * Instructions that cause VM-exits unconditionally or the condition is
5434 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5435 * happens, it's guaranteed to be a nested-guest VM-exit).
5436 *
5437 * - Provides VM-exit instruction length.
5438 * - Provides VM-exit information.
5439 * - Optionally provides Exit qualification.
5440 *
5441 * Since Exit qualification is 0 for all VM-exits where it is not
5442 * applicable, reading and passing it to the guest should produce
5443 * defined behavior.
5444 *
5445 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5446 */
5447 case VMX_EXIT_INVEPT: /* Unconditional. */
5448 case VMX_EXIT_INVVPID:
5449 case VMX_EXIT_VMCLEAR:
5450 case VMX_EXIT_VMPTRLD:
5451 case VMX_EXIT_VMPTRST:
5452 case VMX_EXIT_VMXON:
5453 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5454 case VMX_EXIT_LDTR_TR_ACCESS:
5455 case VMX_EXIT_RDRAND:
5456 case VMX_EXIT_RDSEED:
5457 case VMX_EXIT_XSAVES:
5458 case VMX_EXIT_XRSTORS:
5459 case VMX_EXIT_UMWAIT:
5460 case VMX_EXIT_TPAUSE:
5461 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5462
5463 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5464 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5465 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5466 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5467 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5468 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5469 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5470 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5471 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5472 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5473 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5474 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5475 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5476 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5477 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5478 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5479 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5480 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5481 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5482
5483 case VMX_EXIT_PREEMPT_TIMER:
5484 {
5485 /** @todo NSTVMX: Preempt timer. */
5486 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5487 }
5488
5489 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5491
5492 case VMX_EXIT_VMREAD:
5493 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5494
5495 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5496 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5497
5498 case VMX_EXIT_INIT_SIGNAL:
5499 case VMX_EXIT_SIPI:
5500 case VMX_EXIT_IO_SMI:
5501 case VMX_EXIT_SMI:
5502 case VMX_EXIT_ERR_MSR_LOAD:
5503 case VMX_EXIT_ERR_MACHINE_CHECK:
5504 case VMX_EXIT_PML_FULL:
5505 case VMX_EXIT_RSM:
5506 default:
5507 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5508 }
5509}
5510#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5511
5512
5513/** @name VM-exit helpers.
5514 * @{
5515 */
5516/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5517/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5518/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5519
5520/** Macro for VM-exits called unexpectedly. */
5521#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5522 do { \
5523 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5524 return VERR_VMX_UNEXPECTED_EXIT; \
5525 } while (0)
5526
5527#ifdef VBOX_STRICT
5528# ifndef IN_NEM_DARWIN
5529/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5530# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5531 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5532
5533# define HMVMX_ASSERT_PREEMPT_CPUID() \
5534 do { \
5535 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5536 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5537 } while (0)
5538
5539# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5540 do { \
5541 AssertPtr((a_pVCpu)); \
5542 AssertPtr((a_pVmxTransient)); \
5543 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5544 Assert((a_pVmxTransient)->pVmcsInfo); \
5545 Assert(ASMIntAreEnabled()); \
5546 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5547 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5548 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5549 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5550 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5551 HMVMX_ASSERT_PREEMPT_CPUID(); \
5552 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5553 } while (0)
5554# else
5555# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5556# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5557# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5558 do { \
5559 AssertPtr((a_pVCpu)); \
5560 AssertPtr((a_pVmxTransient)); \
5561 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5562 Assert((a_pVmxTransient)->pVmcsInfo); \
5563 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5564 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5565 } while (0)
5566# endif
5567
5568# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5569 do { \
5570 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5571 Assert((a_pVmxTransient)->fIsNestedGuest); \
5572 } while (0)
5573
5574# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5575 do { \
5576 Log4Func(("\n")); \
5577 } while (0)
5578#else
5579# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5580 do { \
5581 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5582 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5583 } while (0)
5584
5585# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5586 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5587
5588# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5589#endif
5590
5591#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5592/** Macro that does the necessary privilege checks and intercepted VM-exits for
5593 * guests that attempted to execute a VMX instruction. */
5594# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5595 do \
5596 { \
5597 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5598 if (rcStrictTmp == VINF_SUCCESS) \
5599 { /* likely */ } \
5600 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5601 { \
5602 Assert((a_pVCpu)->hm.s.Event.fPending); \
5603 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5604 return VINF_SUCCESS; \
5605 } \
5606 else \
5607 { \
5608 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5609 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5610 } \
5611 } while (0)
5612
5613/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5614# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5615 do \
5616 { \
5617 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5618 (a_pGCPtrEffAddr)); \
5619 if (rcStrictTmp == VINF_SUCCESS) \
5620 { /* likely */ } \
5621 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5622 { \
5623 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5624 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5625 NOREF(uXcptTmp); \
5626 return VINF_SUCCESS; \
5627 } \
5628 else \
5629 { \
5630 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5631 return rcStrictTmp; \
5632 } \
5633 } while (0)
5634#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5635
5636
5637/**
5638 * Advances the guest RIP by the specified number of bytes.
5639 *
5640 * @param pVCpu The cross context virtual CPU structure.
5641 * @param cbInstr Number of bytes to advance the RIP by.
5642 *
5643 * @remarks No-long-jump zone!!!
5644 */
5645DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5646{
5647 /* Advance the RIP. */
5648 pVCpu->cpum.GstCtx.rip += cbInstr;
5649 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5650
5651 /* Update interrupt inhibition. */
5652 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5653 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5654 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5655}
5656
5657
5658/**
5659 * Advances the guest RIP after reading it from the VMCS.
5660 *
5661 * @returns VBox status code, no informational status codes.
5662 * @param pVCpu The cross context virtual CPU structure.
5663 * @param pVmxTransient The VMX-transient structure.
5664 *
5665 * @remarks No-long-jump zone!!!
5666 */
5667static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5668{
5669 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5670 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5671 AssertRCReturn(rc, rc);
5672
5673 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5674 return VINF_SUCCESS;
5675}
5676
5677
5678/**
5679 * Handle a condition that occurred while delivering an event through the guest or
5680 * nested-guest IDT.
5681 *
5682 * @returns Strict VBox status code (i.e. informational status codes too).
5683 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5684 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5685 * to continue execution of the guest which will delivery the \#DF.
5686 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5687 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5688 *
5689 * @param pVCpu The cross context virtual CPU structure.
5690 * @param pVmxTransient The VMX-transient structure.
5691 *
5692 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5693 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5694 * is due to an EPT violation, PML full or SPP-related event.
5695 *
5696 * @remarks No-long-jump zone!!!
5697 */
5698static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5699{
5700 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5701 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5702 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5703 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5704 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5705 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5706
5707 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5708 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5709 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5710 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5711 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5712 {
5713 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5714 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5715
5716 /*
5717 * If the event was a software interrupt (generated with INT n) or a software exception
5718 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5719 * can handle the VM-exit and continue guest execution which will re-execute the
5720 * instruction rather than re-injecting the exception, as that can cause premature
5721 * trips to ring-3 before injection and involve TRPM which currently has no way of
5722 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5723 * the problem).
5724 */
5725 IEMXCPTRAISE enmRaise;
5726 IEMXCPTRAISEINFO fRaiseInfo;
5727 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5728 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5729 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5730 {
5731 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5732 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5733 }
5734 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5735 {
5736 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5737 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5738 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5739
5740 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5741 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5742
5743 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5744
5745 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5746 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5747 {
5748 pVmxTransient->fVectoringPF = true;
5749 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5750 }
5751 }
5752 else
5753 {
5754 /*
5755 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5756 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5757 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5758 */
5759 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5760 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5761 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5762 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5763 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5764 }
5765
5766 /*
5767 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5768 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5769 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5770 * subsequent VM-entry would fail, see @bugref{7445}.
5771 *
5772 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5773 */
5774 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5775 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5776 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5777 && CPUMIsGuestNmiBlocking(pVCpu))
5778 {
5779 CPUMSetGuestNmiBlocking(pVCpu, false);
5780 }
5781
5782 switch (enmRaise)
5783 {
5784 case IEMXCPTRAISE_CURRENT_XCPT:
5785 {
5786 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5787 Assert(rcStrict == VINF_SUCCESS);
5788 break;
5789 }
5790
5791 case IEMXCPTRAISE_PREV_EVENT:
5792 {
5793 uint32_t u32ErrCode;
5794 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5795 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5796 else
5797 u32ErrCode = 0;
5798
5799 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5800 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5801 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5802 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5803
5804 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5805 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5806 Assert(rcStrict == VINF_SUCCESS);
5807 break;
5808 }
5809
5810 case IEMXCPTRAISE_REEXEC_INSTR:
5811 Assert(rcStrict == VINF_SUCCESS);
5812 break;
5813
5814 case IEMXCPTRAISE_DOUBLE_FAULT:
5815 {
5816 /*
5817 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5818 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5819 */
5820 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5821 {
5822 pVmxTransient->fVectoringDoublePF = true;
5823 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5824 pVCpu->cpum.GstCtx.cr2));
5825 rcStrict = VINF_SUCCESS;
5826 }
5827 else
5828 {
5829 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5830 vmxHCSetPendingXcptDF(pVCpu);
5831 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5832 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5833 rcStrict = VINF_HM_DOUBLE_FAULT;
5834 }
5835 break;
5836 }
5837
5838 case IEMXCPTRAISE_TRIPLE_FAULT:
5839 {
5840 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5841 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5842 rcStrict = VINF_EM_RESET;
5843 break;
5844 }
5845
5846 case IEMXCPTRAISE_CPU_HANG:
5847 {
5848 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5849 rcStrict = VERR_EM_GUEST_CPU_HANG;
5850 break;
5851 }
5852
5853 default:
5854 {
5855 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5856 rcStrict = VERR_VMX_IPE_2;
5857 break;
5858 }
5859 }
5860 }
5861 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5862 && !CPUMIsGuestNmiBlocking(pVCpu))
5863 {
5864 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5865 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5866 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5867 {
5868 /*
5869 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5870 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5871 * that virtual NMIs remain blocked until the IRET execution is completed.
5872 *
5873 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5874 */
5875 CPUMSetGuestNmiBlocking(pVCpu, true);
5876 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5877 }
5878 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5879 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5880 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5881 {
5882 /*
5883 * Execution of IRET caused an EPT violation, page-modification log-full event or
5884 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5885 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5886 * that virtual NMIs remain blocked until the IRET execution is completed.
5887 *
5888 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5889 */
5890 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5891 {
5892 CPUMSetGuestNmiBlocking(pVCpu, true);
5893 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5894 }
5895 }
5896 }
5897
5898 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5899 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5900 return rcStrict;
5901}
5902
5903
5904#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5905/**
5906 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5907 * guest attempting to execute a VMX instruction.
5908 *
5909 * @returns Strict VBox status code (i.e. informational status codes too).
5910 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5911 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5912 *
5913 * @param pVCpu The cross context virtual CPU structure.
5914 * @param uExitReason The VM-exit reason.
5915 *
5916 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5917 * @remarks No-long-jump zone!!!
5918 */
5919static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5920{
5921 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5922 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5923
5924 /*
5925 * The physical CPU would have already checked the CPU mode/code segment.
5926 * We shall just assert here for paranoia.
5927 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5928 */
5929 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5930 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5931 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5932
5933 if (uExitReason == VMX_EXIT_VMXON)
5934 {
5935 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5936
5937 /*
5938 * We check CR4.VMXE because it is required to be always set while in VMX operation
5939 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5940 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5941 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5942 */
5943 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5944 {
5945 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5946 vmxHCSetPendingXcptUD(pVCpu);
5947 return VINF_HM_PENDING_XCPT;
5948 }
5949 }
5950 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5951 {
5952 /*
5953 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5954 * (other than VMXON), we need to raise a #UD.
5955 */
5956 Log4Func(("Not in VMX root mode -> #UD\n"));
5957 vmxHCSetPendingXcptUD(pVCpu);
5958 return VINF_HM_PENDING_XCPT;
5959 }
5960
5961 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5962 return VINF_SUCCESS;
5963}
5964
5965
5966/**
5967 * Decodes the memory operand of an instruction that caused a VM-exit.
5968 *
5969 * The Exit qualification field provides the displacement field for memory
5970 * operand instructions, if any.
5971 *
5972 * @returns Strict VBox status code (i.e. informational status codes too).
5973 * @retval VINF_SUCCESS if the operand was successfully decoded.
5974 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5975 * operand.
5976 * @param pVCpu The cross context virtual CPU structure.
5977 * @param uExitInstrInfo The VM-exit instruction information field.
5978 * @param enmMemAccess The memory operand's access type (read or write).
5979 * @param GCPtrDisp The instruction displacement field, if any. For
5980 * RIP-relative addressing pass RIP + displacement here.
5981 * @param pGCPtrMem Where to store the effective destination memory address.
5982 *
5983 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5984 * virtual-8086 mode hence skips those checks while verifying if the
5985 * segment is valid.
5986 */
5987static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5988 PRTGCPTR pGCPtrMem)
5989{
5990 Assert(pGCPtrMem);
5991 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
5992 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
5993 | CPUMCTX_EXTRN_CR0);
5994
5995 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5996 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
5997 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
5998
5999 VMXEXITINSTRINFO ExitInstrInfo;
6000 ExitInstrInfo.u = uExitInstrInfo;
6001 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6002 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6003 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6004 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6005 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6006 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6007 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6008 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6009 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6010
6011 /*
6012 * Validate instruction information.
6013 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6014 */
6015 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6016 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6017 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6018 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6019 AssertLogRelMsgReturn(fIsMemOperand,
6020 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6021
6022 /*
6023 * Compute the complete effective address.
6024 *
6025 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6026 * See AMD spec. 4.5.2 "Segment Registers".
6027 */
6028 RTGCPTR GCPtrMem = GCPtrDisp;
6029 if (fBaseRegValid)
6030 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6031 if (fIdxRegValid)
6032 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6033
6034 RTGCPTR const GCPtrOff = GCPtrMem;
6035 if ( !fIsLongMode
6036 || iSegReg >= X86_SREG_FS)
6037 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6038 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6039
6040 /*
6041 * Validate effective address.
6042 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6043 */
6044 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6045 Assert(cbAccess > 0);
6046 if (fIsLongMode)
6047 {
6048 if (X86_IS_CANONICAL(GCPtrMem))
6049 {
6050 *pGCPtrMem = GCPtrMem;
6051 return VINF_SUCCESS;
6052 }
6053
6054 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6055 * "Data Limit Checks in 64-bit Mode". */
6056 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6057 vmxHCSetPendingXcptGP(pVCpu, 0);
6058 return VINF_HM_PENDING_XCPT;
6059 }
6060
6061 /*
6062 * This is a watered down version of iemMemApplySegment().
6063 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6064 * and segment CPL/DPL checks are skipped.
6065 */
6066 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6067 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6068 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6069
6070 /* Check if the segment is present and usable. */
6071 if ( pSel->Attr.n.u1Present
6072 && !pSel->Attr.n.u1Unusable)
6073 {
6074 Assert(pSel->Attr.n.u1DescType);
6075 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6076 {
6077 /* Check permissions for the data segment. */
6078 if ( enmMemAccess == VMXMEMACCESS_WRITE
6079 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6080 {
6081 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6082 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6083 return VINF_HM_PENDING_XCPT;
6084 }
6085
6086 /* Check limits if it's a normal data segment. */
6087 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6088 {
6089 if ( GCPtrFirst32 > pSel->u32Limit
6090 || GCPtrLast32 > pSel->u32Limit)
6091 {
6092 Log4Func(("Data segment limit exceeded. "
6093 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6094 GCPtrLast32, pSel->u32Limit));
6095 if (iSegReg == X86_SREG_SS)
6096 vmxHCSetPendingXcptSS(pVCpu, 0);
6097 else
6098 vmxHCSetPendingXcptGP(pVCpu, 0);
6099 return VINF_HM_PENDING_XCPT;
6100 }
6101 }
6102 else
6103 {
6104 /* Check limits if it's an expand-down data segment.
6105 Note! The upper boundary is defined by the B bit, not the G bit! */
6106 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6107 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6108 {
6109 Log4Func(("Expand-down data segment limit exceeded. "
6110 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6111 GCPtrLast32, pSel->u32Limit));
6112 if (iSegReg == X86_SREG_SS)
6113 vmxHCSetPendingXcptSS(pVCpu, 0);
6114 else
6115 vmxHCSetPendingXcptGP(pVCpu, 0);
6116 return VINF_HM_PENDING_XCPT;
6117 }
6118 }
6119 }
6120 else
6121 {
6122 /* Check permissions for the code segment. */
6123 if ( enmMemAccess == VMXMEMACCESS_WRITE
6124 || ( enmMemAccess == VMXMEMACCESS_READ
6125 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6126 {
6127 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6128 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6129 vmxHCSetPendingXcptGP(pVCpu, 0);
6130 return VINF_HM_PENDING_XCPT;
6131 }
6132
6133 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6134 if ( GCPtrFirst32 > pSel->u32Limit
6135 || GCPtrLast32 > pSel->u32Limit)
6136 {
6137 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6138 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6139 if (iSegReg == X86_SREG_SS)
6140 vmxHCSetPendingXcptSS(pVCpu, 0);
6141 else
6142 vmxHCSetPendingXcptGP(pVCpu, 0);
6143 return VINF_HM_PENDING_XCPT;
6144 }
6145 }
6146 }
6147 else
6148 {
6149 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6150 vmxHCSetPendingXcptGP(pVCpu, 0);
6151 return VINF_HM_PENDING_XCPT;
6152 }
6153
6154 *pGCPtrMem = GCPtrMem;
6155 return VINF_SUCCESS;
6156}
6157#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6158
6159
6160/**
6161 * VM-exit helper for LMSW.
6162 */
6163static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6164{
6165 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6166 AssertRCReturn(rc, rc);
6167
6168 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6169 AssertMsg( rcStrict == VINF_SUCCESS
6170 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6171
6172 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6173 if (rcStrict == VINF_IEM_RAISED_XCPT)
6174 {
6175 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6176 rcStrict = VINF_SUCCESS;
6177 }
6178
6179 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6180 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6181 return rcStrict;
6182}
6183
6184
6185/**
6186 * VM-exit helper for CLTS.
6187 */
6188static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6189{
6190 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6191 AssertRCReturn(rc, rc);
6192
6193 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6194 AssertMsg( rcStrict == VINF_SUCCESS
6195 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6196
6197 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6198 if (rcStrict == VINF_IEM_RAISED_XCPT)
6199 {
6200 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6201 rcStrict = VINF_SUCCESS;
6202 }
6203
6204 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6205 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6206 return rcStrict;
6207}
6208
6209
6210/**
6211 * VM-exit helper for MOV from CRx (CRx read).
6212 */
6213static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6214{
6215 Assert(iCrReg < 16);
6216 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6217
6218 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6219 AssertRCReturn(rc, rc);
6220
6221 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6222 AssertMsg( rcStrict == VINF_SUCCESS
6223 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6224
6225 if (iGReg == X86_GREG_xSP)
6226 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6227 else
6228 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6229#ifdef VBOX_WITH_STATISTICS
6230 switch (iCrReg)
6231 {
6232 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6233 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6234 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6235 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6236 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6237 }
6238#endif
6239 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6240 return rcStrict;
6241}
6242
6243
6244/**
6245 * VM-exit helper for MOV to CRx (CRx write).
6246 */
6247static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6248{
6249 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6250
6251 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6252 AssertMsg( rcStrict == VINF_SUCCESS
6253 || rcStrict == VINF_IEM_RAISED_XCPT
6254 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6255
6256 switch (iCrReg)
6257 {
6258 case 0:
6259 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6260 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6261 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6262 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6263 break;
6264
6265 case 2:
6266 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6267 /* Nothing to do here, CR2 it's not part of the VMCS. */
6268 break;
6269
6270 case 3:
6271 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6272 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6273 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6274 break;
6275
6276 case 4:
6277 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6278 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6279#ifndef IN_NEM_DARWIN
6280 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6281 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6282#else
6283 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6284#endif
6285 break;
6286
6287 case 8:
6288 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6289 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6290 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6291 break;
6292
6293 default:
6294 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6295 break;
6296 }
6297
6298 if (rcStrict == VINF_IEM_RAISED_XCPT)
6299 {
6300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6301 rcStrict = VINF_SUCCESS;
6302 }
6303 return rcStrict;
6304}
6305
6306
6307/**
6308 * VM-exit exception handler for \#PF (Page-fault exception).
6309 *
6310 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6311 */
6312static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6313{
6314 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6315 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6316
6317#ifndef IN_NEM_DARWIN
6318 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6319 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6320 { /* likely */ }
6321 else
6322#endif
6323 {
6324#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6325 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6326#endif
6327 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6328 if (!pVmxTransient->fVectoringDoublePF)
6329 {
6330 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6331 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6332 }
6333 else
6334 {
6335 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6336 Assert(!pVmxTransient->fIsNestedGuest);
6337 vmxHCSetPendingXcptDF(pVCpu);
6338 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6339 }
6340 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6341 return VINF_SUCCESS;
6342 }
6343
6344 Assert(!pVmxTransient->fIsNestedGuest);
6345
6346 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6347 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6348 if (pVmxTransient->fVectoringPF)
6349 {
6350 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6351 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6352 }
6353
6354 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6355 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6356 AssertRCReturn(rc, rc);
6357
6358 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6359 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6360
6361 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6362 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6363
6364 Log4Func(("#PF: rc=%Rrc\n", rc));
6365 if (rc == VINF_SUCCESS)
6366 {
6367 /*
6368 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6369 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6370 */
6371 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6372 TRPMResetTrap(pVCpu);
6373 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6374 return rc;
6375 }
6376
6377 if (rc == VINF_EM_RAW_GUEST_TRAP)
6378 {
6379 if (!pVmxTransient->fVectoringDoublePF)
6380 {
6381 /* It's a guest page fault and needs to be reflected to the guest. */
6382 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6383 TRPMResetTrap(pVCpu);
6384 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6385 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6386 uGstErrorCode, pVmxTransient->uExitQual);
6387 }
6388 else
6389 {
6390 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6391 TRPMResetTrap(pVCpu);
6392 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6393 vmxHCSetPendingXcptDF(pVCpu);
6394 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6395 }
6396
6397 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6398 return VINF_SUCCESS;
6399 }
6400
6401 TRPMResetTrap(pVCpu);
6402 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6403 return rc;
6404}
6405
6406
6407/**
6408 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6409 *
6410 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6411 */
6412static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6413{
6414 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6415 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6416
6417 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6418 AssertRCReturn(rc, rc);
6419
6420 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6421 {
6422 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6423 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6424
6425 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6426 * provides VM-exit instruction length. If this causes problem later,
6427 * disassemble the instruction like it's done on AMD-V. */
6428 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6429 AssertRCReturn(rc2, rc2);
6430 return rc;
6431 }
6432
6433 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6434 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6435 return VINF_SUCCESS;
6436}
6437
6438
6439/**
6440 * VM-exit exception handler for \#BP (Breakpoint exception).
6441 *
6442 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6443 */
6444static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6445{
6446 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6447 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6448
6449 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6450 AssertRCReturn(rc, rc);
6451
6452 VBOXSTRICTRC rcStrict;
6453 if (!pVmxTransient->fIsNestedGuest)
6454 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6455 else
6456 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6457
6458 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6459 {
6460 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6461 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6462 rcStrict = VINF_SUCCESS;
6463 }
6464
6465 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6466 return rcStrict;
6467}
6468
6469
6470/**
6471 * VM-exit exception handler for \#AC (Alignment-check exception).
6472 *
6473 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6474 */
6475static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6476{
6477 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6478
6479 /*
6480 * Detect #ACs caused by host having enabled split-lock detection.
6481 * Emulate such instructions.
6482 */
6483 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6484 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6485 AssertRCReturn(rc, rc);
6486 /** @todo detect split lock in cpu feature? */
6487 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6488 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6489 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6490 || CPUMGetGuestCPL(pVCpu) != 3
6491 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6492 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6493 {
6494 /*
6495 * Check for debug/trace events and import state accordingly.
6496 */
6497 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6498 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6499 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6500#ifndef IN_NEM_DARWIN
6501 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6502#endif
6503 )
6504 {
6505 if (pVM->cCpus == 1)
6506 {
6507#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6508 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6509#else
6510 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6511#endif
6512 AssertRCReturn(rc, rc);
6513 }
6514 }
6515 else
6516 {
6517 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6518 AssertRCReturn(rc, rc);
6519
6520 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6521
6522 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6523 {
6524 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6525 if (rcStrict != VINF_SUCCESS)
6526 return rcStrict;
6527 }
6528 }
6529
6530 /*
6531 * Emulate the instruction.
6532 *
6533 * We have to ignore the LOCK prefix here as we must not retrigger the
6534 * detection on the host. This isn't all that satisfactory, though...
6535 */
6536 if (pVM->cCpus == 1)
6537 {
6538 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6539 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6540
6541 /** @todo For SMP configs we should do a rendezvous here. */
6542 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6543 if (rcStrict == VINF_SUCCESS)
6544#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6545 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6546 HM_CHANGED_GUEST_RIP
6547 | HM_CHANGED_GUEST_RFLAGS
6548 | HM_CHANGED_GUEST_GPRS_MASK
6549 | HM_CHANGED_GUEST_CS
6550 | HM_CHANGED_GUEST_SS);
6551#else
6552 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6553#endif
6554 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6555 {
6556 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6557 rcStrict = VINF_SUCCESS;
6558 }
6559 return rcStrict;
6560 }
6561 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6562 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6563 return VINF_EM_EMULATE_SPLIT_LOCK;
6564 }
6565
6566 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6567 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6568 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6569
6570 /* Re-inject it. We'll detect any nesting before getting here. */
6571 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6572 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6573 return VINF_SUCCESS;
6574}
6575
6576
6577/**
6578 * VM-exit exception handler for \#DB (Debug exception).
6579 *
6580 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6581 */
6582static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6583{
6584 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6585 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6586
6587 /*
6588 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6589 */
6590 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6591
6592 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6593 uint64_t const uDR6 = X86_DR6_INIT_VAL
6594 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6595 | X86_DR6_BD | X86_DR6_BS));
6596
6597 int rc;
6598 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6599 if (!pVmxTransient->fIsNestedGuest)
6600 {
6601 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6602
6603 /*
6604 * Prevents stepping twice over the same instruction when the guest is stepping using
6605 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6606 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6607 */
6608 if ( rc == VINF_EM_DBG_STEPPED
6609 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6610 {
6611 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6612 rc = VINF_EM_RAW_GUEST_TRAP;
6613 }
6614 }
6615 else
6616 rc = VINF_EM_RAW_GUEST_TRAP;
6617 Log6Func(("rc=%Rrc\n", rc));
6618 if (rc == VINF_EM_RAW_GUEST_TRAP)
6619 {
6620 /*
6621 * The exception was for the guest. Update DR6, DR7.GD and
6622 * IA32_DEBUGCTL.LBR before forwarding it.
6623 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6624 */
6625#ifndef IN_NEM_DARWIN
6626 VMMRZCallRing3Disable(pVCpu);
6627 HM_DISABLE_PREEMPT(pVCpu);
6628
6629 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6630 pCtx->dr[6] |= uDR6;
6631 if (CPUMIsGuestDebugStateActive(pVCpu))
6632 ASMSetDR6(pCtx->dr[6]);
6633
6634 HM_RESTORE_PREEMPT();
6635 VMMRZCallRing3Enable(pVCpu);
6636#else
6637 /** @todo */
6638#endif
6639
6640 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6641 AssertRCReturn(rc, rc);
6642
6643 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6644 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6645
6646 /* Paranoia. */
6647 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6648 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6649
6650 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6651 AssertRC(rc);
6652
6653 /*
6654 * Raise #DB in the guest.
6655 *
6656 * It is important to reflect exactly what the VM-exit gave us (preserving the
6657 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6658 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6659 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6660 *
6661 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6662 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6663 */
6664 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6665 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6666 return VINF_SUCCESS;
6667 }
6668
6669 /*
6670 * Not a guest trap, must be a hypervisor related debug event then.
6671 * Update DR6 in case someone is interested in it.
6672 */
6673 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6674 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6675 CPUMSetHyperDR6(pVCpu, uDR6);
6676
6677 return rc;
6678}
6679
6680
6681/**
6682 * Hacks its way around the lovely mesa driver's backdoor accesses.
6683 *
6684 * @sa hmR0SvmHandleMesaDrvGp.
6685 */
6686static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6687{
6688 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6689 RT_NOREF(pCtx);
6690
6691 /* For now we'll just skip the instruction. */
6692 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6693}
6694
6695
6696/**
6697 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6698 * backdoor logging w/o checking what it is running inside.
6699 *
6700 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6701 * backdoor port and magic numbers loaded in registers.
6702 *
6703 * @returns true if it is, false if it isn't.
6704 * @sa hmR0SvmIsMesaDrvGp.
6705 */
6706DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6707{
6708 /* 0xed: IN eAX,dx */
6709 uint8_t abInstr[1];
6710 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6711 return false;
6712
6713 /* Check that it is #GP(0). */
6714 if (pVmxTransient->uExitIntErrorCode != 0)
6715 return false;
6716
6717 /* Check magic and port. */
6718 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6719 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6720 if (pCtx->rax != UINT32_C(0x564d5868))
6721 return false;
6722 if (pCtx->dx != UINT32_C(0x5658))
6723 return false;
6724
6725 /* Flat ring-3 CS. */
6726 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6727 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6728 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6729 if (pCtx->cs.Attr.n.u2Dpl != 3)
6730 return false;
6731 if (pCtx->cs.u64Base != 0)
6732 return false;
6733
6734 /* Check opcode. */
6735 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6736 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6737 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6738 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6739 if (RT_FAILURE(rc))
6740 return false;
6741 if (abInstr[0] != 0xed)
6742 return false;
6743
6744 return true;
6745}
6746
6747
6748/**
6749 * VM-exit exception handler for \#GP (General-protection exception).
6750 *
6751 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6752 */
6753static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6754{
6755 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6756 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6757
6758 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6759 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6760#ifndef IN_NEM_DARWIN
6761 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6762 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6763 { /* likely */ }
6764 else
6765#endif
6766 {
6767#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6768# ifndef IN_NEM_DARWIN
6769 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6770# else
6771 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6772# endif
6773#endif
6774 /*
6775 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6776 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6777 */
6778 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6779 AssertRCReturn(rc, rc);
6780 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6781 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6782
6783 if ( pVmxTransient->fIsNestedGuest
6784 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6785 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6786 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6787 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6788 else
6789 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6790 return rc;
6791 }
6792
6793#ifndef IN_NEM_DARWIN
6794 Assert(CPUMIsGuestInRealModeEx(pCtx));
6795 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6796 Assert(!pVmxTransient->fIsNestedGuest);
6797
6798 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6799 AssertRCReturn(rc, rc);
6800
6801 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6802 if (rcStrict == VINF_SUCCESS)
6803 {
6804 if (!CPUMIsGuestInRealModeEx(pCtx))
6805 {
6806 /*
6807 * The guest is no longer in real-mode, check if we can continue executing the
6808 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6809 */
6810 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6811 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6812 {
6813 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6814 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6815 }
6816 else
6817 {
6818 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6819 rcStrict = VINF_EM_RESCHEDULE;
6820 }
6821 }
6822 else
6823 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6824 }
6825 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6826 {
6827 rcStrict = VINF_SUCCESS;
6828 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6829 }
6830 return VBOXSTRICTRC_VAL(rcStrict);
6831#endif
6832}
6833
6834
6835/**
6836 * VM-exit exception handler wrapper for all other exceptions that are not handled
6837 * by a specific handler.
6838 *
6839 * This simply re-injects the exception back into the VM without any special
6840 * processing.
6841 *
6842 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6843 */
6844static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6845{
6846 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6847
6848#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6849# ifndef IN_NEM_DARWIN
6850 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6851 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6852 ("uVector=%#x u32XcptBitmap=%#X32\n",
6853 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6854 NOREF(pVmcsInfo);
6855# endif
6856#endif
6857
6858 /*
6859 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6860 * would have been handled while checking exits due to event delivery.
6861 */
6862 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6863
6864#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6865 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6866 AssertRCReturn(rc, rc);
6867 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6868#endif
6869
6870#ifdef VBOX_WITH_STATISTICS
6871 switch (uVector)
6872 {
6873 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6874 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6875 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6876 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6877 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6878 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6879 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6880 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6881 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6882 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6883 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6884 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6885 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6886 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6887 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6888 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6889 default:
6890 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6891 break;
6892 }
6893#endif
6894
6895 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6896 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6897 NOREF(uVector);
6898
6899 /* Re-inject the original exception into the guest. */
6900 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6901 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6902 return VINF_SUCCESS;
6903}
6904
6905
6906/**
6907 * VM-exit exception handler for all exceptions (except NMIs!).
6908 *
6909 * @remarks This may be called for both guests and nested-guests. Take care to not
6910 * make assumptions and avoid doing anything that is not relevant when
6911 * executing a nested-guest (e.g., Mesa driver hacks).
6912 */
6913static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6914{
6915 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6916
6917 /*
6918 * If this VM-exit occurred while delivering an event through the guest IDT, take
6919 * action based on the return code and additional hints (e.g. for page-faults)
6920 * that will be updated in the VMX transient structure.
6921 */
6922 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6923 if (rcStrict == VINF_SUCCESS)
6924 {
6925 /*
6926 * If an exception caused a VM-exit due to delivery of an event, the original
6927 * event may have to be re-injected into the guest. We shall reinject it and
6928 * continue guest execution. However, page-fault is a complicated case and
6929 * needs additional processing done in vmxHCExitXcptPF().
6930 */
6931 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6932 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6933 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6934 || uVector == X86_XCPT_PF)
6935 {
6936 switch (uVector)
6937 {
6938 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6939 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6940 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6941 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6942 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6943 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6944 default:
6945 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6946 }
6947 }
6948 /* else: inject pending event before resuming guest execution. */
6949 }
6950 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6951 {
6952 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6953 rcStrict = VINF_SUCCESS;
6954 }
6955
6956 return rcStrict;
6957}
6958/** @} */
6959
6960
6961/** @name VM-exit handlers.
6962 * @{
6963 */
6964/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6965/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6966/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6967
6968/**
6969 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6970 */
6971HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6972{
6973 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6974 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
6975
6976#ifndef IN_NEM_DARWIN
6977 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
6978 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
6979 return VINF_SUCCESS;
6980 return VINF_EM_RAW_INTERRUPT;
6981#else
6982 return VINF_SUCCESS;
6983#endif
6984}
6985
6986
6987/**
6988 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
6989 * VM-exit.
6990 */
6991HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6992{
6993 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6994 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
6995
6996 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
6997
6998 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
6999 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7000 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7001
7002 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7003 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7004 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7005 NOREF(pVmcsInfo);
7006
7007 VBOXSTRICTRC rcStrict;
7008 switch (uExitIntType)
7009 {
7010#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7011 /*
7012 * Host physical NMIs:
7013 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7014 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7015 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7016 *
7017 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7018 * See Intel spec. 27.5.5 "Updating Non-Register State".
7019 */
7020 case VMX_EXIT_INT_INFO_TYPE_NMI:
7021 {
7022 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7023 break;
7024 }
7025#endif
7026
7027 /*
7028 * Privileged software exceptions (#DB from ICEBP),
7029 * Software exceptions (#BP and #OF),
7030 * Hardware exceptions:
7031 * Process the required exceptions and resume guest execution if possible.
7032 */
7033 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7034 Assert(uVector == X86_XCPT_DB);
7035 RT_FALL_THRU();
7036 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7037 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7038 RT_FALL_THRU();
7039 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7040 {
7041 NOREF(uVector);
7042 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7043 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7044 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7045 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7046
7047 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7048 break;
7049 }
7050
7051 default:
7052 {
7053 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7054 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7055 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7056 break;
7057 }
7058 }
7059
7060 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7061 return rcStrict;
7062}
7063
7064
7065/**
7066 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7067 */
7068HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7069{
7070 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7071
7072 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7073 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7074 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7075
7076 /* Evaluate and deliver pending events and resume guest execution. */
7077 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7078 return VINF_SUCCESS;
7079}
7080
7081
7082/**
7083 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7084 */
7085HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7086{
7087 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7088
7089 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7090 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7091 {
7092 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7093 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7094 }
7095
7096 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7097
7098 /*
7099 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7100 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7101 */
7102 uint32_t fIntrState;
7103 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7104 AssertRC(rc);
7105 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7106 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7107 {
7108 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7109 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7110
7111 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7112 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7113 AssertRC(rc);
7114 }
7115
7116 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7117 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7118
7119 /* Evaluate and deliver pending events and resume guest execution. */
7120 return VINF_SUCCESS;
7121}
7122
7123
7124/**
7125 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7126 */
7127HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7128{
7129 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7130 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7131}
7132
7133
7134/**
7135 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7136 */
7137HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7138{
7139 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7140 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7141}
7142
7143
7144/**
7145 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7146 */
7147HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7148{
7149 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7150
7151 /*
7152 * Get the state we need and update the exit history entry.
7153 */
7154 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7155 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7156
7157 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7158 AssertRCReturn(rc, rc);
7159
7160 VBOXSTRICTRC rcStrict;
7161 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7162 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7163 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7164 if (!pExitRec)
7165 {
7166 /*
7167 * Regular CPUID instruction execution.
7168 */
7169 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7170 if (rcStrict == VINF_SUCCESS)
7171 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7172 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7173 {
7174 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7175 rcStrict = VINF_SUCCESS;
7176 }
7177 }
7178 else
7179 {
7180 /*
7181 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7182 */
7183 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7184 AssertRCReturn(rc2, rc2);
7185
7186 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7187 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7188
7189 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7190 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7191
7192 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7193 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7194 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7195 }
7196 return rcStrict;
7197}
7198
7199
7200/**
7201 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7202 */
7203HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7204{
7205 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7206
7207 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7208 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7209 AssertRCReturn(rc, rc);
7210
7211 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7212 return VINF_EM_RAW_EMULATE_INSTR;
7213
7214 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7215 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7216}
7217
7218
7219/**
7220 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7221 */
7222HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7223{
7224 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7225
7226 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7227 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7228 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7229 AssertRCReturn(rc, rc);
7230
7231 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7232 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7233 {
7234 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7235 we must reset offsetting on VM-entry. See @bugref{6634}. */
7236 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7237 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7238 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7239 }
7240 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7241 {
7242 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7243 rcStrict = VINF_SUCCESS;
7244 }
7245 return rcStrict;
7246}
7247
7248
7249/**
7250 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7251 */
7252HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7253{
7254 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7255
7256 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7257 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7258 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7259 AssertRCReturn(rc, rc);
7260
7261 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7262 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7263 {
7264 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7265 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7266 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7267 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7268 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7269 }
7270 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7271 {
7272 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7273 rcStrict = VINF_SUCCESS;
7274 }
7275 return rcStrict;
7276}
7277
7278
7279/**
7280 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7281 */
7282HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7283{
7284 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7285
7286 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7287 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7288 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7289 AssertRCReturn(rc, rc);
7290
7291 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7292 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7293 if (RT_LIKELY(rc == VINF_SUCCESS))
7294 {
7295 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7296 Assert(pVmxTransient->cbExitInstr == 2);
7297 }
7298 else
7299 {
7300 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7301 rc = VERR_EM_INTERPRETER;
7302 }
7303 return rc;
7304}
7305
7306
7307/**
7308 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7309 */
7310HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7311{
7312 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7313
7314 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7315 if (EMAreHypercallInstructionsEnabled(pVCpu))
7316 {
7317 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7318 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7319 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7320 AssertRCReturn(rc, rc);
7321
7322 /* Perform the hypercall. */
7323 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7324 if (rcStrict == VINF_SUCCESS)
7325 {
7326 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7327 AssertRCReturn(rc, rc);
7328 }
7329 else
7330 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7331 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7332 || RT_FAILURE(rcStrict));
7333
7334 /* If the hypercall changes anything other than guest's general-purpose registers,
7335 we would need to reload the guest changed bits here before VM-entry. */
7336 }
7337 else
7338 Log4Func(("Hypercalls not enabled\n"));
7339
7340 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7341 if (RT_FAILURE(rcStrict))
7342 {
7343 vmxHCSetPendingXcptUD(pVCpu);
7344 rcStrict = VINF_SUCCESS;
7345 }
7346
7347 return rcStrict;
7348}
7349
7350
7351/**
7352 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7353 */
7354HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7355{
7356 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7357#ifndef IN_NEM_DARWIN
7358 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7359#endif
7360
7361 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7362 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7363 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7364 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7365 AssertRCReturn(rc, rc);
7366
7367 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7368
7369 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7370 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7371 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7372 {
7373 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7374 rcStrict = VINF_SUCCESS;
7375 }
7376 else
7377 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7378 VBOXSTRICTRC_VAL(rcStrict)));
7379 return rcStrict;
7380}
7381
7382
7383/**
7384 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7385 */
7386HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7387{
7388 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7389
7390 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7391 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7392 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7393 AssertRCReturn(rc, rc);
7394
7395 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7396 if (rcStrict == VINF_SUCCESS)
7397 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7398 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7399 {
7400 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7401 rcStrict = VINF_SUCCESS;
7402 }
7403
7404 return rcStrict;
7405}
7406
7407
7408/**
7409 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7410 */
7411HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7412{
7413 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7414
7415 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7416 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7417 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7418 AssertRCReturn(rc, rc);
7419
7420 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7421 if (RT_SUCCESS(rcStrict))
7422 {
7423 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7424 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7425 rcStrict = VINF_SUCCESS;
7426 }
7427
7428 return rcStrict;
7429}
7430
7431
7432/**
7433 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7434 * VM-exit.
7435 */
7436HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7437{
7438 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7439 return VINF_EM_RESET;
7440}
7441
7442
7443/**
7444 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7445 */
7446HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7447{
7448 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7449
7450 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7451 AssertRCReturn(rc, rc);
7452
7453 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7454 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7455 rc = VINF_SUCCESS;
7456 else
7457 rc = VINF_EM_HALT;
7458
7459 if (rc != VINF_SUCCESS)
7460 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7461 return rc;
7462}
7463
7464
7465/**
7466 * VM-exit handler for instructions that result in a \#UD exception delivered to
7467 * the guest.
7468 */
7469HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7470{
7471 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7472 vmxHCSetPendingXcptUD(pVCpu);
7473 return VINF_SUCCESS;
7474}
7475
7476
7477/**
7478 * VM-exit handler for expiry of the VMX-preemption timer.
7479 */
7480HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7481{
7482 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7483
7484 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7485 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7486Log12(("vmxHCExitPreemptTimer:\n"));
7487
7488 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7489 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7490 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7491 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7492 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7493}
7494
7495
7496/**
7497 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7498 */
7499HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7500{
7501 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7502
7503 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7504 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7505 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7506 AssertRCReturn(rc, rc);
7507
7508 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7509 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7510 : HM_CHANGED_RAISED_XCPT_MASK);
7511
7512#ifndef IN_NEM_DARWIN
7513 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7514 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7515 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7516 {
7517 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7518 hmR0VmxUpdateStartVmFunction(pVCpu);
7519 }
7520#endif
7521
7522 return rcStrict;
7523}
7524
7525
7526/**
7527 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7528 */
7529HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7530{
7531 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7532
7533 /** @todo Enable the new code after finding a reliably guest test-case. */
7534#if 1
7535 return VERR_EM_INTERPRETER;
7536#else
7537 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7538 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7539 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7540 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7541 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7542 AssertRCReturn(rc, rc);
7543
7544 /* Paranoia. Ensure this has a memory operand. */
7545 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7546
7547 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7548 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7549 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7550 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7551
7552 RTGCPTR GCPtrDesc;
7553 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7554
7555 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7556 GCPtrDesc, uType);
7557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7558 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7559 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7560 {
7561 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7562 rcStrict = VINF_SUCCESS;
7563 }
7564 return rcStrict;
7565#endif
7566}
7567
7568
7569/**
7570 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7571 * VM-exit.
7572 */
7573HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7574{
7575 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7576 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7577 AssertRCReturn(rc, rc);
7578
7579 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7580 if (RT_FAILURE(rc))
7581 return rc;
7582
7583 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7584 NOREF(uInvalidReason);
7585
7586#ifdef VBOX_STRICT
7587 uint32_t fIntrState;
7588 uint64_t u64Val;
7589 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7590 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7591 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7592
7593 Log4(("uInvalidReason %u\n", uInvalidReason));
7594 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7595 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7596 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7597
7598 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7599 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7600 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7601 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7602 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7603 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7604 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7605 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7606 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7607 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7608 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7609 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7610# ifndef IN_NEM_DARWIN
7611 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7612 {
7613 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7614 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7615 }
7616
7617 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7618# endif
7619#endif
7620
7621 return VERR_VMX_INVALID_GUEST_STATE;
7622}
7623
7624/**
7625 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7626 */
7627HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7628{
7629 /*
7630 * Cumulative notes of all recognized but unexpected VM-exits.
7631 *
7632 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7633 * nested-paging is used.
7634 *
7635 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7636 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7637 * this function (and thereby stop VM execution) for handling such instructions.
7638 *
7639 *
7640 * VMX_EXIT_INIT_SIGNAL:
7641 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7642 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7643 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7644 *
7645 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7646 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7647 * See Intel spec. "23.8 Restrictions on VMX operation".
7648 *
7649 * VMX_EXIT_SIPI:
7650 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7651 * activity state is used. We don't make use of it as our guests don't have direct
7652 * access to the host local APIC.
7653 *
7654 * See Intel spec. 25.3 "Other Causes of VM-exits".
7655 *
7656 * VMX_EXIT_IO_SMI:
7657 * VMX_EXIT_SMI:
7658 * This can only happen if we support dual-monitor treatment of SMI, which can be
7659 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7660 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7661 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7662 *
7663 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7664 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7665 *
7666 * VMX_EXIT_ERR_MSR_LOAD:
7667 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7668 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7669 * execution.
7670 *
7671 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7672 *
7673 * VMX_EXIT_ERR_MACHINE_CHECK:
7674 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7675 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7676 * #MC exception abort class exception is raised. We thus cannot assume a
7677 * reasonable chance of continuing any sort of execution and we bail.
7678 *
7679 * See Intel spec. 15.1 "Machine-check Architecture".
7680 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7681 *
7682 * VMX_EXIT_PML_FULL:
7683 * VMX_EXIT_VIRTUALIZED_EOI:
7684 * VMX_EXIT_APIC_WRITE:
7685 * We do not currently support any of these features and thus they are all unexpected
7686 * VM-exits.
7687 *
7688 * VMX_EXIT_GDTR_IDTR_ACCESS:
7689 * VMX_EXIT_LDTR_TR_ACCESS:
7690 * VMX_EXIT_RDRAND:
7691 * VMX_EXIT_RSM:
7692 * VMX_EXIT_VMFUNC:
7693 * VMX_EXIT_ENCLS:
7694 * VMX_EXIT_RDSEED:
7695 * VMX_EXIT_XSAVES:
7696 * VMX_EXIT_XRSTORS:
7697 * VMX_EXIT_UMWAIT:
7698 * VMX_EXIT_TPAUSE:
7699 * VMX_EXIT_LOADIWKEY:
7700 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7701 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7702 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7703 *
7704 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7705 */
7706 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7707 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7708 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7709}
7710
7711
7712/**
7713 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7714 */
7715HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7716{
7717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7718
7719 /** @todo Optimize this: We currently drag in the whole MSR state
7720 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7721 * MSRs required. That would require changes to IEM and possibly CPUM too.
7722 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7723 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7724 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7725 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7726 switch (idMsr)
7727 {
7728 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7729 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7730 }
7731
7732 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7733 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7734 AssertRCReturn(rc, rc);
7735
7736 Log4Func(("ecx=%#RX32\n", idMsr));
7737
7738#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7739 Assert(!pVmxTransient->fIsNestedGuest);
7740 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7741 {
7742 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7743 && idMsr != MSR_K6_EFER)
7744 {
7745 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7746 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7747 }
7748 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7749 {
7750 Assert(pVmcsInfo->pvMsrBitmap);
7751 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7752 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7753 {
7754 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7755 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7756 }
7757 }
7758 }
7759#endif
7760
7761 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7762 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7763 if (rcStrict == VINF_SUCCESS)
7764 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7765 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7766 {
7767 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7768 rcStrict = VINF_SUCCESS;
7769 }
7770 else
7771 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7772 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7773
7774 return rcStrict;
7775}
7776
7777
7778/**
7779 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7780 */
7781HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7782{
7783 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7784
7785 /** @todo Optimize this: We currently drag in the whole MSR state
7786 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7787 * MSRs required. That would require changes to IEM and possibly CPUM too.
7788 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7789 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7790 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7791
7792 /*
7793 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7794 * Although we don't need to fetch the base as it will be overwritten shortly, while
7795 * loading guest-state we would also load the entire segment register including limit
7796 * and attributes and thus we need to load them here.
7797 */
7798 switch (idMsr)
7799 {
7800 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7801 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7802 }
7803
7804 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7805 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7806 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7807 AssertRCReturn(rc, rc);
7808
7809 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7810
7811 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7812 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7813
7814 if (rcStrict == VINF_SUCCESS)
7815 {
7816 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7817
7818 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7819 if ( idMsr == MSR_IA32_APICBASE
7820 || ( idMsr >= MSR_IA32_X2APIC_START
7821 && idMsr <= MSR_IA32_X2APIC_END))
7822 {
7823 /*
7824 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7825 * When full APIC register virtualization is implemented we'll have to make
7826 * sure APIC state is saved from the VMCS before IEM changes it.
7827 */
7828 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7829 }
7830 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7831 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7832 else if (idMsr == MSR_K6_EFER)
7833 {
7834 /*
7835 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7836 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7837 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7838 */
7839 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7840 }
7841
7842 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7843 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7844 {
7845 switch (idMsr)
7846 {
7847 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7848 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7849 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7850 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7851 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7852 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7853 default:
7854 {
7855#ifndef IN_NEM_DARWIN
7856 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7857 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7858 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7859 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7860#else
7861 AssertMsgFailed(("TODO\n"));
7862#endif
7863 break;
7864 }
7865 }
7866 }
7867#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7868 else
7869 {
7870 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7871 switch (idMsr)
7872 {
7873 case MSR_IA32_SYSENTER_CS:
7874 case MSR_IA32_SYSENTER_EIP:
7875 case MSR_IA32_SYSENTER_ESP:
7876 case MSR_K8_FS_BASE:
7877 case MSR_K8_GS_BASE:
7878 {
7879 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7880 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7881 }
7882
7883 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7884 default:
7885 {
7886 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7887 {
7888 /* EFER MSR writes are always intercepted. */
7889 if (idMsr != MSR_K6_EFER)
7890 {
7891 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7892 idMsr));
7893 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7894 }
7895 }
7896
7897 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7898 {
7899 Assert(pVmcsInfo->pvMsrBitmap);
7900 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7901 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7902 {
7903 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7904 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7905 }
7906 }
7907 break;
7908 }
7909 }
7910 }
7911#endif /* VBOX_STRICT */
7912 }
7913 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7914 {
7915 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7916 rcStrict = VINF_SUCCESS;
7917 }
7918 else
7919 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7920 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7921
7922 return rcStrict;
7923}
7924
7925
7926/**
7927 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7928 */
7929HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7930{
7931 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7932
7933 /** @todo The guest has likely hit a contended spinlock. We might want to
7934 * poke a schedule different guest VCPU. */
7935 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7936 if (RT_SUCCESS(rc))
7937 return VINF_EM_RAW_INTERRUPT;
7938
7939 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7940 return rc;
7941}
7942
7943
7944/**
7945 * VM-exit handler for when the TPR value is lowered below the specified
7946 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7947 */
7948HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7949{
7950 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7951 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7952
7953 /*
7954 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
7955 * We'll re-evaluate pending interrupts and inject them before the next VM
7956 * entry so we can just continue execution here.
7957 */
7958 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
7959 return VINF_SUCCESS;
7960}
7961
7962
7963/**
7964 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7965 * VM-exit.
7966 *
7967 * @retval VINF_SUCCESS when guest execution can continue.
7968 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7969 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
7970 * incompatible guest state for VMX execution (real-on-v86 case).
7971 */
7972HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7973{
7974 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7975 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
7976
7977 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7978 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7979 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7980
7981 VBOXSTRICTRC rcStrict;
7982 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7983 uint64_t const uExitQual = pVmxTransient->uExitQual;
7984 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
7985 switch (uAccessType)
7986 {
7987 /*
7988 * MOV to CRx.
7989 */
7990 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
7991 {
7992 /*
7993 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
7994 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
7995 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
7996 * PAE PDPTEs as well.
7997 */
7998 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7999 AssertRCReturn(rc, rc);
8000
8001 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8002#ifndef IN_NEM_DARWIN
8003 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8004#endif
8005 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8006 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8007
8008 /*
8009 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8010 * - When nested paging isn't used.
8011 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8012 * - We are executing in the VM debug loop.
8013 */
8014#ifndef IN_NEM_DARWIN
8015 Assert( iCrReg != 3
8016 || !VM_IS_VMX_NESTED_PAGING(pVM)
8017 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8018 || pVCpu->hmr0.s.fUsingDebugLoop);
8019#else
8020 Assert( iCrReg != 3
8021 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8022#endif
8023
8024 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8025 Assert( iCrReg != 8
8026 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8027
8028 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8029 AssertMsg( rcStrict == VINF_SUCCESS
8030 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8031
8032#ifndef IN_NEM_DARWIN
8033 /*
8034 * This is a kludge for handling switches back to real mode when we try to use
8035 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8036 * deal with special selector values, so we have to return to ring-3 and run
8037 * there till the selector values are V86 mode compatible.
8038 *
8039 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8040 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8041 * this function.
8042 */
8043 if ( iCrReg == 0
8044 && rcStrict == VINF_SUCCESS
8045 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8046 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8047 && (uOldCr0 & X86_CR0_PE)
8048 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8049 {
8050 /** @todo Check selectors rather than returning all the time. */
8051 Assert(!pVmxTransient->fIsNestedGuest);
8052 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8053 rcStrict = VINF_EM_RESCHEDULE_REM;
8054 }
8055#endif
8056
8057 break;
8058 }
8059
8060 /*
8061 * MOV from CRx.
8062 */
8063 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8064 {
8065 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8066 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8067
8068 /*
8069 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8070 * - When nested paging isn't used.
8071 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8072 * - We are executing in the VM debug loop.
8073 */
8074#ifndef IN_NEM_DARWIN
8075 Assert( iCrReg != 3
8076 || !VM_IS_VMX_NESTED_PAGING(pVM)
8077 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8078 || pVCpu->hmr0.s.fLeaveDone);
8079#else
8080 Assert( iCrReg != 3
8081 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8082#endif
8083
8084 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8085 Assert( iCrReg != 8
8086 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8087
8088 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8089 break;
8090 }
8091
8092 /*
8093 * CLTS (Clear Task-Switch Flag in CR0).
8094 */
8095 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8096 {
8097 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8098 break;
8099 }
8100
8101 /*
8102 * LMSW (Load Machine-Status Word into CR0).
8103 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8104 */
8105 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8106 {
8107 RTGCPTR GCPtrEffDst;
8108 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8109 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8110 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8111 if (fMemOperand)
8112 {
8113 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8114 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8115 }
8116 else
8117 GCPtrEffDst = NIL_RTGCPTR;
8118 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8119 break;
8120 }
8121
8122 default:
8123 {
8124 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8125 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8126 }
8127 }
8128
8129 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8130 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8131 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8132
8133 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8134 NOREF(pVM);
8135 return rcStrict;
8136}
8137
8138
8139/**
8140 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8141 * VM-exit.
8142 */
8143HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8144{
8145 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8146 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8147
8148 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8149 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8150 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8151 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8152 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8153 | CPUMCTX_EXTRN_EFER);
8154 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8155 AssertRCReturn(rc, rc);
8156
8157 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8158 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8159 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8160 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8161 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8162 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8163 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8164 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8165
8166 /*
8167 * Update exit history to see if this exit can be optimized.
8168 */
8169 VBOXSTRICTRC rcStrict;
8170 PCEMEXITREC pExitRec = NULL;
8171 if ( !fGstStepping
8172 && !fDbgStepping)
8173 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8174 !fIOString
8175 ? !fIOWrite
8176 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8177 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8178 : !fIOWrite
8179 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8180 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8181 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8182 if (!pExitRec)
8183 {
8184 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8185 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8186
8187 uint32_t const cbValue = s_aIOSizes[uIOSize];
8188 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8189 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8190 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8191 if (fIOString)
8192 {
8193 /*
8194 * INS/OUTS - I/O String instruction.
8195 *
8196 * Use instruction-information if available, otherwise fall back on
8197 * interpreting the instruction.
8198 */
8199 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8200 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8201 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8202 if (fInsOutsInfo)
8203 {
8204 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8205 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8206 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8207 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8208 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8209 if (fIOWrite)
8210 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8211 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8212 else
8213 {
8214 /*
8215 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8216 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8217 * See Intel Instruction spec. for "INS".
8218 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8219 */
8220 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8221 }
8222 }
8223 else
8224 rcStrict = IEMExecOne(pVCpu);
8225
8226 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8227 fUpdateRipAlready = true;
8228 }
8229 else
8230 {
8231 /*
8232 * IN/OUT - I/O instruction.
8233 */
8234 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8235 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8236 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8237 if (fIOWrite)
8238 {
8239 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8240 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8241#ifndef IN_NEM_DARWIN
8242 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8243 && !pCtx->eflags.Bits.u1TF)
8244 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8245#endif
8246 }
8247 else
8248 {
8249 uint32_t u32Result = 0;
8250 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8251 if (IOM_SUCCESS(rcStrict))
8252 {
8253 /* Save result of I/O IN instr. in AL/AX/EAX. */
8254 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8255 }
8256#ifndef IN_NEM_DARWIN
8257 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8258 && !pCtx->eflags.Bits.u1TF)
8259 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8260#endif
8261 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8262 }
8263 }
8264
8265 if (IOM_SUCCESS(rcStrict))
8266 {
8267 if (!fUpdateRipAlready)
8268 {
8269 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8270 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8271 }
8272
8273 /*
8274 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8275 * while booting Fedora 17 64-bit guest.
8276 *
8277 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8278 */
8279 if (fIOString)
8280 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8281
8282 /*
8283 * If any I/O breakpoints are armed, we need to check if one triggered
8284 * and take appropriate action.
8285 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8286 */
8287 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8288 AssertRCReturn(rc, rc);
8289
8290 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8291 * execution engines about whether hyper BPs and such are pending. */
8292 uint32_t const uDr7 = pCtx->dr[7];
8293 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8294 && X86_DR7_ANY_RW_IO(uDr7)
8295 && (pCtx->cr4 & X86_CR4_DE))
8296 || DBGFBpIsHwIoArmed(pVM)))
8297 {
8298 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8299
8300#ifndef IN_NEM_DARWIN
8301 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8302 VMMRZCallRing3Disable(pVCpu);
8303 HM_DISABLE_PREEMPT(pVCpu);
8304
8305 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8306
8307 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8308 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8309 {
8310 /* Raise #DB. */
8311 if (fIsGuestDbgActive)
8312 ASMSetDR6(pCtx->dr[6]);
8313 if (pCtx->dr[7] != uDr7)
8314 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8315
8316 vmxHCSetPendingXcptDB(pVCpu);
8317 }
8318 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8319 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8320 else if ( rcStrict2 != VINF_SUCCESS
8321 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8322 rcStrict = rcStrict2;
8323 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8324
8325 HM_RESTORE_PREEMPT();
8326 VMMRZCallRing3Enable(pVCpu);
8327#else
8328 /** @todo */
8329#endif
8330 }
8331 }
8332
8333#ifdef VBOX_STRICT
8334 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8335 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8336 Assert(!fIOWrite);
8337 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8338 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8339 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8340 Assert(fIOWrite);
8341 else
8342 {
8343# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8344 * statuses, that the VMM device and some others may return. See
8345 * IOM_SUCCESS() for guidance. */
8346 AssertMsg( RT_FAILURE(rcStrict)
8347 || rcStrict == VINF_SUCCESS
8348 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8349 || rcStrict == VINF_EM_DBG_BREAKPOINT
8350 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8351 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8352# endif
8353 }
8354#endif
8355 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8356 }
8357 else
8358 {
8359 /*
8360 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8361 */
8362 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8363 AssertRCReturn(rc2, rc2);
8364 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8365 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8366 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8367 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8368 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8369 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8370
8371 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8372 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8373
8374 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8375 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8376 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8377 }
8378 return rcStrict;
8379}
8380
8381
8382/**
8383 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8384 * VM-exit.
8385 */
8386HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8387{
8388 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8389
8390 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8391 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8392 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8393 {
8394 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8395 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8396 {
8397 uint32_t uErrCode;
8398 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8399 {
8400 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8401 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8402 }
8403 else
8404 uErrCode = 0;
8405
8406 RTGCUINTPTR GCPtrFaultAddress;
8407 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8408 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8409 else
8410 GCPtrFaultAddress = 0;
8411
8412 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8413
8414 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8415 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8416
8417 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8418 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8419 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8420 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8421 }
8422 }
8423
8424 /* Fall back to the interpreter to emulate the task-switch. */
8425 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8426 return VERR_EM_INTERPRETER;
8427}
8428
8429
8430/**
8431 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8432 */
8433HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8434{
8435 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8436
8437 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8438 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8439 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8440 AssertRC(rc);
8441 return VINF_EM_DBG_STEPPED;
8442}
8443
8444
8445/**
8446 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8447 */
8448HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8449{
8450 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8451 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8452
8453 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8454 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8455 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8456 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8457 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8458
8459 /*
8460 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8461 */
8462 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8463 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8464 {
8465 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8466 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8467 {
8468 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8469 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8470 }
8471 }
8472 else
8473 {
8474 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8475 return rcStrict;
8476 }
8477
8478 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8479 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8480 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8481 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8482 AssertRCReturn(rc, rc);
8483
8484 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8485 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8486 switch (uAccessType)
8487 {
8488#ifndef IN_NEM_DARWIN
8489 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8490 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8491 {
8492 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8493 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8494 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8495
8496 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8497 GCPhys &= PAGE_BASE_GC_MASK;
8498 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8499 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8500 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8501
8502 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8503 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8504 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8505 if ( rcStrict == VINF_SUCCESS
8506 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8507 || rcStrict == VERR_PAGE_NOT_PRESENT)
8508 {
8509 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8510 | HM_CHANGED_GUEST_APIC_TPR);
8511 rcStrict = VINF_SUCCESS;
8512 }
8513 break;
8514 }
8515#else
8516 /** @todo */
8517#endif
8518
8519 default:
8520 {
8521 Log4Func(("uAccessType=%#x\n", uAccessType));
8522 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8523 break;
8524 }
8525 }
8526
8527 if (rcStrict != VINF_SUCCESS)
8528 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8529 return rcStrict;
8530}
8531
8532
8533/**
8534 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8535 * VM-exit.
8536 */
8537HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8538{
8539 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8540 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8541
8542 /*
8543 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8544 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8545 * must emulate the MOV DRx access.
8546 */
8547 if (!pVmxTransient->fIsNestedGuest)
8548 {
8549 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8550 if (pVmxTransient->fWasGuestDebugStateActive)
8551 {
8552 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8553 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8554 }
8555
8556 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8557 && !pVmxTransient->fWasHyperDebugStateActive)
8558 {
8559 Assert(!DBGFIsStepping(pVCpu));
8560 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8561
8562 /* Don't intercept MOV DRx any more. */
8563 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8564 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8565 AssertRC(rc);
8566
8567#ifndef IN_NEM_DARWIN
8568 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8569 VMMRZCallRing3Disable(pVCpu);
8570 HM_DISABLE_PREEMPT(pVCpu);
8571
8572 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8573 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8574 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8575
8576 HM_RESTORE_PREEMPT();
8577 VMMRZCallRing3Enable(pVCpu);
8578#else
8579 /** @todo */
8580#endif
8581
8582#ifdef VBOX_WITH_STATISTICS
8583 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8584 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8585 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8586 else
8587 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8588#endif
8589 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8590 return VINF_SUCCESS;
8591 }
8592 }
8593
8594 /*
8595 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8596 * The EFER MSR is always up-to-date.
8597 * Update the segment registers and DR7 from the CPU.
8598 */
8599 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8600 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8601 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8602 AssertRCReturn(rc, rc);
8603 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8604
8605 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8606 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8607 {
8608 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8609 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8610 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8611 if (RT_SUCCESS(rc))
8612 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8613 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8614 }
8615 else
8616 {
8617 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8618 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8619 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8620 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8621 }
8622
8623 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8624 if (RT_SUCCESS(rc))
8625 {
8626 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8627 AssertRCReturn(rc2, rc2);
8628 return VINF_SUCCESS;
8629 }
8630 return rc;
8631}
8632
8633
8634/**
8635 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8636 * Conditional VM-exit.
8637 */
8638HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8639{
8640 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8641
8642#ifndef IN_NEM_DARWIN
8643 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8644
8645 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8646 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8647 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8648 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8649 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8650
8651 /*
8652 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8653 */
8654 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8655 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8656 {
8657 /*
8658 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8659 * instruction emulation to inject the original event. Otherwise, injecting the original event
8660 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8661 */
8662 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8663 { /* likely */ }
8664 else
8665 {
8666 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8667#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8668 /** @todo NSTVMX: Think about how this should be handled. */
8669 if (pVmxTransient->fIsNestedGuest)
8670 return VERR_VMX_IPE_3;
8671#endif
8672 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8673 }
8674 }
8675 else
8676 {
8677 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8678 return rcStrict;
8679 }
8680
8681 /*
8682 * Get sufficient state and update the exit history entry.
8683 */
8684 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8685 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8686 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8687 AssertRCReturn(rc, rc);
8688
8689 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8690 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8691 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8692 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8693 if (!pExitRec)
8694 {
8695 /*
8696 * If we succeed, resume guest execution.
8697 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8698 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8699 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8700 * weird case. See @bugref{6043}.
8701 */
8702 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8703 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8704/** @todo bird: We can probably just go straight to IOM here and assume that
8705 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8706 * well. However, we need to address that aliasing workarounds that
8707 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8708 *
8709 * Might also be interesting to see if we can get this done more or
8710 * less locklessly inside IOM. Need to consider the lookup table
8711 * updating and use a bit more carefully first (or do all updates via
8712 * rendezvous) */
8713 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8714 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8715 if ( rcStrict == VINF_SUCCESS
8716 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8717 || rcStrict == VERR_PAGE_NOT_PRESENT)
8718 {
8719 /* Successfully handled MMIO operation. */
8720 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8721 | HM_CHANGED_GUEST_APIC_TPR);
8722 rcStrict = VINF_SUCCESS;
8723 }
8724 }
8725 else
8726 {
8727 /*
8728 * Frequent exit or something needing probing. Call EMHistoryExec.
8729 */
8730 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8731 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8732
8733 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8734 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8735
8736 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8737 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8738 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8739 }
8740 return rcStrict;
8741#else
8742 AssertFailed();
8743 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8744#endif
8745}
8746
8747
8748/**
8749 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8750 * VM-exit.
8751 */
8752HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8753{
8754 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8755#ifndef IN_NEM_DARWIN
8756 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8757
8758 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8759 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8760 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8761 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8762 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8763 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8764
8765 /*
8766 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8767 */
8768 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8769 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8770 {
8771 /*
8772 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8773 * we shall resolve the nested #PF and re-inject the original event.
8774 */
8775 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8776 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8777 }
8778 else
8779 {
8780 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8781 return rcStrict;
8782 }
8783
8784 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8785 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8786 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8787 AssertRCReturn(rc, rc);
8788
8789 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8790 uint64_t const uExitQual = pVmxTransient->uExitQual;
8791 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8792
8793 RTGCUINT uErrorCode = 0;
8794 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8795 uErrorCode |= X86_TRAP_PF_ID;
8796 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8797 uErrorCode |= X86_TRAP_PF_RW;
8798 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8799 uErrorCode |= X86_TRAP_PF_P;
8800
8801 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8802 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8803
8804 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8805
8806 /*
8807 * Handle the pagefault trap for the nested shadow table.
8808 */
8809 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8810 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8811 TRPMResetTrap(pVCpu);
8812
8813 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8814 if ( rcStrict == VINF_SUCCESS
8815 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8816 || rcStrict == VERR_PAGE_NOT_PRESENT)
8817 {
8818 /* Successfully synced our nested page tables. */
8819 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8820 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8821 return VINF_SUCCESS;
8822 }
8823#else
8824 PVM pVM = pVCpu->CTX_SUFF(pVM);
8825 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8826 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8827 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8828 vmxHCImportGuestRip(pVCpu);
8829 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8830
8831 /*
8832 * Ask PGM for information about the given GCPhys. We need to check if we're
8833 * out of sync first.
8834 */
8835 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8836 PGMPHYSNEMPAGEINFO Info;
8837 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8838 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8839 if (RT_SUCCESS(rc))
8840 {
8841 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8842 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8843 {
8844 if (State.fCanResume)
8845 {
8846 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8847 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8848 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8849 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8850 State.fDidSomething ? "" : " no-change"));
8851 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8852 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8853 return VINF_SUCCESS;
8854 }
8855 }
8856
8857 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8858 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8859 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8860 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8861 State.fDidSomething ? "" : " no-change"));
8862 }
8863 else
8864 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8865 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8866 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8867
8868 /*
8869 * Emulate the memory access, either access handler or special memory.
8870 */
8871 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8872 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8873 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8874 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8875 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8876
8877 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8878 AssertRCReturn(rc, rc);
8879
8880 VBOXSTRICTRC rcStrict;
8881 if (!pExitRec)
8882 rcStrict = IEMExecOne(pVCpu);
8883 else
8884 {
8885 /* Frequent access or probing. */
8886 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8887 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8888 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8889 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8890 }
8891
8892 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8893#endif
8894
8895 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8896 return rcStrict;
8897}
8898
8899
8900#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8901/**
8902 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8903 */
8904HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8905{
8906 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8907
8908 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8909 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8910 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8911 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8912 | CPUMCTX_EXTRN_HWVIRT
8913 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8914 AssertRCReturn(rc, rc);
8915
8916 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8917
8918 VMXVEXITINFO ExitInfo;
8919 RT_ZERO(ExitInfo);
8920 ExitInfo.uReason = pVmxTransient->uExitReason;
8921 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8922 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8923 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8924 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8925
8926 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8927 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8928 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8929 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8930 {
8931 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8932 rcStrict = VINF_SUCCESS;
8933 }
8934 return rcStrict;
8935}
8936
8937
8938/**
8939 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8940 */
8941HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8942{
8943 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8944
8945 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8946 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8947 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8948 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8949 AssertRCReturn(rc, rc);
8950
8951 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8952
8953 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8954 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
8955 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8956 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8957 {
8958 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8959 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8960 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
8961 }
8962 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8963 return rcStrict;
8964}
8965
8966
8967/**
8968 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
8969 */
8970HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8971{
8972 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8973
8974 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8975 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8976 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8977 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8978 | CPUMCTX_EXTRN_HWVIRT
8979 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8980 AssertRCReturn(rc, rc);
8981
8982 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8983
8984 VMXVEXITINFO ExitInfo;
8985 RT_ZERO(ExitInfo);
8986 ExitInfo.uReason = pVmxTransient->uExitReason;
8987 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8988 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8989 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8990 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8991
8992 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
8993 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8994 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8995 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8996 {
8997 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8998 rcStrict = VINF_SUCCESS;
8999 }
9000 return rcStrict;
9001}
9002
9003
9004/**
9005 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9006 */
9007HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9008{
9009 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9010
9011 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9012 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9013 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9014 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9015 | CPUMCTX_EXTRN_HWVIRT
9016 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9017 AssertRCReturn(rc, rc);
9018
9019 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9020
9021 VMXVEXITINFO ExitInfo;
9022 RT_ZERO(ExitInfo);
9023 ExitInfo.uReason = pVmxTransient->uExitReason;
9024 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9025 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9026 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9027 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9028
9029 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9030 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9031 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9032 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9033 {
9034 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9035 rcStrict = VINF_SUCCESS;
9036 }
9037 return rcStrict;
9038}
9039
9040
9041/**
9042 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9043 */
9044HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9045{
9046 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9047
9048 /*
9049 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9050 * thus might not need to import the shadow VMCS state, it's safer just in case
9051 * code elsewhere dares look at unsynced VMCS fields.
9052 */
9053 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9054 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9055 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9056 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9057 | CPUMCTX_EXTRN_HWVIRT
9058 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9059 AssertRCReturn(rc, rc);
9060
9061 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9062
9063 VMXVEXITINFO ExitInfo;
9064 RT_ZERO(ExitInfo);
9065 ExitInfo.uReason = pVmxTransient->uExitReason;
9066 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9067 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9068 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9069 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9070 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9071
9072 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9073 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9074 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9075 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9076 {
9077 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9078 rcStrict = VINF_SUCCESS;
9079 }
9080 return rcStrict;
9081}
9082
9083
9084/**
9085 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9086 */
9087HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9088{
9089 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9090
9091 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9092 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9093 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9094 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9095 AssertRCReturn(rc, rc);
9096
9097 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9098
9099 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9100 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9101 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9102 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9103 {
9104 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9105 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9106 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9107 }
9108 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9109 return rcStrict;
9110}
9111
9112
9113/**
9114 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9115 */
9116HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9117{
9118 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9119
9120 /*
9121 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9122 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9123 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9124 */
9125 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9126 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9127 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9128 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9129 | CPUMCTX_EXTRN_HWVIRT
9130 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9131 AssertRCReturn(rc, rc);
9132
9133 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9134
9135 VMXVEXITINFO ExitInfo;
9136 RT_ZERO(ExitInfo);
9137 ExitInfo.uReason = pVmxTransient->uExitReason;
9138 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9139 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9140 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9141 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9142 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9143
9144 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9145 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9146 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9147 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9148 {
9149 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9150 rcStrict = VINF_SUCCESS;
9151 }
9152 return rcStrict;
9153}
9154
9155
9156/**
9157 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9158 */
9159HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9160{
9161 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9162
9163 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9164 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9165 | CPUMCTX_EXTRN_HWVIRT
9166 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9167 AssertRCReturn(rc, rc);
9168
9169 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9170
9171 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9172 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9173 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9174 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9175 {
9176 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9177 rcStrict = VINF_SUCCESS;
9178 }
9179 return rcStrict;
9180}
9181
9182
9183/**
9184 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9185 */
9186HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9187{
9188 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9189
9190 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9191 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9192 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9193 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9194 | CPUMCTX_EXTRN_HWVIRT
9195 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9196 AssertRCReturn(rc, rc);
9197
9198 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9199
9200 VMXVEXITINFO ExitInfo;
9201 RT_ZERO(ExitInfo);
9202 ExitInfo.uReason = pVmxTransient->uExitReason;
9203 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9204 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9205 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9206 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9207
9208 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9209 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9210 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9211 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9212 {
9213 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9214 rcStrict = VINF_SUCCESS;
9215 }
9216 return rcStrict;
9217}
9218
9219
9220/**
9221 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9222 */
9223HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9224{
9225 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9226
9227 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9228 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9229 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9230 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9231 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9232 AssertRCReturn(rc, rc);
9233
9234 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9235
9236 VMXVEXITINFO ExitInfo;
9237 RT_ZERO(ExitInfo);
9238 ExitInfo.uReason = pVmxTransient->uExitReason;
9239 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9240 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9241 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9242 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9243
9244 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9245 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9246 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9247 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9248 {
9249 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9250 rcStrict = VINF_SUCCESS;
9251 }
9252 return rcStrict;
9253}
9254#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9255/** @} */
9256
9257
9258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9259/** @name Nested-guest VM-exit handlers.
9260 * @{
9261 */
9262/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9263/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9264/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9265
9266/**
9267 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9268 * Conditional VM-exit.
9269 */
9270HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9271{
9272 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9273
9274 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9275
9276 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9277 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9278 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9279
9280 switch (uExitIntType)
9281 {
9282#ifndef IN_NEM_DARWIN
9283 /*
9284 * Physical NMIs:
9285 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9286 */
9287 case VMX_EXIT_INT_INFO_TYPE_NMI:
9288 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9289#endif
9290
9291 /*
9292 * Hardware exceptions,
9293 * Software exceptions,
9294 * Privileged software exceptions:
9295 * Figure out if the exception must be delivered to the guest or the nested-guest.
9296 */
9297 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9298 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9299 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9300 {
9301 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9302 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9303 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9304 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9305
9306 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9307 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9308 pVmxTransient->uExitIntErrorCode);
9309 if (fIntercept)
9310 {
9311 /* Exit qualification is required for debug and page-fault exceptions. */
9312 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9313
9314 /*
9315 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9316 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9317 * length. However, if delivery of a software interrupt, software exception or privileged
9318 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9319 */
9320 VMXVEXITINFO ExitInfo;
9321 RT_ZERO(ExitInfo);
9322 ExitInfo.uReason = pVmxTransient->uExitReason;
9323 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9324 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9325
9326 VMXVEXITEVENTINFO ExitEventInfo;
9327 RT_ZERO(ExitEventInfo);
9328 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9329 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9330 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9331 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9332
9333#ifdef DEBUG_ramshankar
9334 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9335 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9336 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9337 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9338 {
9339 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9340 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9341 }
9342#endif
9343 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9344 }
9345
9346 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9347 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9348 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9349 }
9350
9351 /*
9352 * Software interrupts:
9353 * VM-exits cannot be caused by software interrupts.
9354 *
9355 * External interrupts:
9356 * This should only happen when "acknowledge external interrupts on VM-exit"
9357 * control is set. However, we never set this when executing a guest or
9358 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9359 * the guest.
9360 */
9361 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9362 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9363 default:
9364 {
9365 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9366 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9367 }
9368 }
9369}
9370
9371
9372/**
9373 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9374 * Unconditional VM-exit.
9375 */
9376HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9377{
9378 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9379 return IEMExecVmxVmexitTripleFault(pVCpu);
9380}
9381
9382
9383/**
9384 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9385 */
9386HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9387{
9388 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9389
9390 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9391 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9392 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9393}
9394
9395
9396/**
9397 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9398 */
9399HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9400{
9401 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9402
9403 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9404 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9405 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9406}
9407
9408
9409/**
9410 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9411 * Unconditional VM-exit.
9412 */
9413HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9414{
9415 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9416
9417 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9418 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9419 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9420 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9421
9422 VMXVEXITINFO ExitInfo;
9423 RT_ZERO(ExitInfo);
9424 ExitInfo.uReason = pVmxTransient->uExitReason;
9425 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9426 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9427
9428 VMXVEXITEVENTINFO ExitEventInfo;
9429 RT_ZERO(ExitEventInfo);
9430 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9431 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9432 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9433}
9434
9435
9436/**
9437 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9438 */
9439HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9440{
9441 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9442
9443 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9444 {
9445 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9446 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9447 }
9448 return vmxHCExitHlt(pVCpu, pVmxTransient);
9449}
9450
9451
9452/**
9453 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9454 */
9455HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9456{
9457 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9458
9459 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9460 {
9461 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9462 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9463
9464 VMXVEXITINFO ExitInfo;
9465 RT_ZERO(ExitInfo);
9466 ExitInfo.uReason = pVmxTransient->uExitReason;
9467 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9468 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9469 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9470 }
9471 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9472}
9473
9474
9475/**
9476 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9477 */
9478HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9479{
9480 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9481
9482 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9483 {
9484 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9485 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9486 }
9487 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9488}
9489
9490
9491/**
9492 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9493 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9494 */
9495HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9496{
9497 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9498
9499 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9500 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9501
9502 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9503
9504 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9505 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9506 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9507
9508 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9509 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9510 u64VmcsField &= UINT64_C(0xffffffff);
9511
9512 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9513 {
9514 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9515 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9516
9517 VMXVEXITINFO ExitInfo;
9518 RT_ZERO(ExitInfo);
9519 ExitInfo.uReason = pVmxTransient->uExitReason;
9520 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9521 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9522 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9523 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9524 }
9525
9526 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9527 return vmxHCExitVmread(pVCpu, pVmxTransient);
9528 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9529}
9530
9531
9532/**
9533 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9534 */
9535HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9536{
9537 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9538
9539 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9540 {
9541 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9542 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9543 }
9544
9545 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9546}
9547
9548
9549/**
9550 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9551 * Conditional VM-exit.
9552 */
9553HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9554{
9555 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9556
9557 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9558 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9559
9560 VBOXSTRICTRC rcStrict;
9561 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9562 switch (uAccessType)
9563 {
9564 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9565 {
9566 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9567 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9568 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9569 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9570
9571 bool fIntercept;
9572 switch (iCrReg)
9573 {
9574 case 0:
9575 case 4:
9576 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9577 break;
9578
9579 case 3:
9580 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9581 break;
9582
9583 case 8:
9584 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9585 break;
9586
9587 default:
9588 fIntercept = false;
9589 break;
9590 }
9591 if (fIntercept)
9592 {
9593 VMXVEXITINFO ExitInfo;
9594 RT_ZERO(ExitInfo);
9595 ExitInfo.uReason = pVmxTransient->uExitReason;
9596 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9597 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9598 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9599 }
9600 else
9601 {
9602 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9603 AssertRCReturn(rc, rc);
9604 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9605 }
9606 break;
9607 }
9608
9609 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9610 {
9611 /*
9612 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9613 * CR2 reads do not cause a VM-exit.
9614 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9615 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9616 */
9617 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9618 if ( iCrReg == 3
9619 || iCrReg == 8)
9620 {
9621 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9622 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9623 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9624 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9625 {
9626 VMXVEXITINFO ExitInfo;
9627 RT_ZERO(ExitInfo);
9628 ExitInfo.uReason = pVmxTransient->uExitReason;
9629 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9630 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9631 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9632 }
9633 else
9634 {
9635 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9636 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9637 }
9638 }
9639 else
9640 {
9641 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9642 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9643 }
9644 break;
9645 }
9646
9647 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9648 {
9649 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9650 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9651 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9652 if ( (uGstHostMask & X86_CR0_TS)
9653 && (uReadShadow & X86_CR0_TS))
9654 {
9655 VMXVEXITINFO ExitInfo;
9656 RT_ZERO(ExitInfo);
9657 ExitInfo.uReason = pVmxTransient->uExitReason;
9658 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9659 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9660 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9661 }
9662 else
9663 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9664 break;
9665 }
9666
9667 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9668 {
9669 RTGCPTR GCPtrEffDst;
9670 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9671 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9672 if (fMemOperand)
9673 {
9674 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9675 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9676 }
9677 else
9678 GCPtrEffDst = NIL_RTGCPTR;
9679
9680 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9681 {
9682 VMXVEXITINFO ExitInfo;
9683 RT_ZERO(ExitInfo);
9684 ExitInfo.uReason = pVmxTransient->uExitReason;
9685 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9686 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9687 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9688 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9689 }
9690 else
9691 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9692 break;
9693 }
9694
9695 default:
9696 {
9697 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9698 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9699 }
9700 }
9701
9702 if (rcStrict == VINF_IEM_RAISED_XCPT)
9703 {
9704 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9705 rcStrict = VINF_SUCCESS;
9706 }
9707 return rcStrict;
9708}
9709
9710
9711/**
9712 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9713 * Conditional VM-exit.
9714 */
9715HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9716{
9717 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9718
9719 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9720 {
9721 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9722 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9723
9724 VMXVEXITINFO ExitInfo;
9725 RT_ZERO(ExitInfo);
9726 ExitInfo.uReason = pVmxTransient->uExitReason;
9727 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9728 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9729 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9730 }
9731 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9732}
9733
9734
9735/**
9736 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9737 * Conditional VM-exit.
9738 */
9739HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9740{
9741 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9742
9743 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9744
9745 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9746 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9747 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9748
9749 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9750 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9751 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9752 {
9753 /*
9754 * IN/OUT instruction:
9755 * - Provides VM-exit instruction length.
9756 *
9757 * INS/OUTS instruction:
9758 * - Provides VM-exit instruction length.
9759 * - Provides Guest-linear address.
9760 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9761 */
9762 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9763 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9764
9765 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9766 pVmxTransient->ExitInstrInfo.u = 0;
9767 pVmxTransient->uGuestLinearAddr = 0;
9768
9769 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9770 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9771 if (fIOString)
9772 {
9773 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9774 if (fVmxInsOutsInfo)
9775 {
9776 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9777 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9778 }
9779 }
9780
9781 VMXVEXITINFO ExitInfo;
9782 RT_ZERO(ExitInfo);
9783 ExitInfo.uReason = pVmxTransient->uExitReason;
9784 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9785 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9786 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9787 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9788 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9789 }
9790 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9791}
9792
9793
9794/**
9795 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9796 */
9797HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9798{
9799 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9800
9801 uint32_t fMsrpm;
9802 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9803 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9804 else
9805 fMsrpm = VMXMSRPM_EXIT_RD;
9806
9807 if (fMsrpm & VMXMSRPM_EXIT_RD)
9808 {
9809 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9810 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9811 }
9812 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9813}
9814
9815
9816/**
9817 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9818 */
9819HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9820{
9821 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9822
9823 uint32_t fMsrpm;
9824 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9825 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9826 else
9827 fMsrpm = VMXMSRPM_EXIT_WR;
9828
9829 if (fMsrpm & VMXMSRPM_EXIT_WR)
9830 {
9831 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9832 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9833 }
9834 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9835}
9836
9837
9838/**
9839 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9840 */
9841HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9842{
9843 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9844
9845 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9846 {
9847 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9848 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9849 }
9850 return vmxHCExitMwait(pVCpu, pVmxTransient);
9851}
9852
9853
9854/**
9855 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9856 * VM-exit.
9857 */
9858HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9859{
9860 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9861
9862 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9863 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9864 VMXVEXITINFO ExitInfo;
9865 RT_ZERO(ExitInfo);
9866 ExitInfo.uReason = pVmxTransient->uExitReason;
9867 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9868 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9869}
9870
9871
9872/**
9873 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9874 */
9875HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9876{
9877 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9878
9879 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9880 {
9881 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9882 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9883 }
9884 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9885}
9886
9887
9888/**
9889 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9890 */
9891HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9892{
9893 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9894
9895 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9896 * PAUSE when executing a nested-guest? If it does not, we would not need
9897 * to check for the intercepts here. Just call VM-exit... */
9898
9899 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9900 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9901 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9902 {
9903 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9904 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9905 }
9906 return vmxHCExitPause(pVCpu, pVmxTransient);
9907}
9908
9909
9910/**
9911 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9912 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9913 */
9914HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9915{
9916 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9917
9918 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9919 {
9920 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9921 VMXVEXITINFO ExitInfo;
9922 RT_ZERO(ExitInfo);
9923 ExitInfo.uReason = pVmxTransient->uExitReason;
9924 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9925 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9926 }
9927 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9928}
9929
9930
9931/**
9932 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9933 * VM-exit.
9934 */
9935HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9936{
9937 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9938
9939 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9940 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9941 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9942 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9943
9944 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9945
9946 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
9947 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
9948
9949 VMXVEXITINFO ExitInfo;
9950 RT_ZERO(ExitInfo);
9951 ExitInfo.uReason = pVmxTransient->uExitReason;
9952 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9953 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9954
9955 VMXVEXITEVENTINFO ExitEventInfo;
9956 RT_ZERO(ExitEventInfo);
9957 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9958 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9959 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
9960}
9961
9962
9963/**
9964 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
9965 * Conditional VM-exit.
9966 */
9967HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9968{
9969 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9970
9971 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
9972 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9973 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
9974}
9975
9976
9977/**
9978 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
9979 * Conditional VM-exit.
9980 */
9981HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9982{
9983 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9984
9985 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
9986 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9987 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
9988}
9989
9990
9991/**
9992 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
9993 */
9994HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9995{
9996 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9997
9998 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9999 {
10000 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10001 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10002 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10003 }
10004 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10005}
10006
10007
10008/**
10009 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10010 */
10011HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10012{
10013 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10014
10015 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10016 {
10017 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10018 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10019 }
10020 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10021}
10022
10023
10024/**
10025 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10026 */
10027HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10028{
10029 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10030
10031 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10032 {
10033 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10034 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10035 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10036 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10037
10038 VMXVEXITINFO ExitInfo;
10039 RT_ZERO(ExitInfo);
10040 ExitInfo.uReason = pVmxTransient->uExitReason;
10041 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10042 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10043 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10044 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10045 }
10046 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10047}
10048
10049
10050/**
10051 * Nested-guest VM-exit handler for invalid-guest state
10052 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10053 */
10054HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10055{
10056 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10057
10058 /*
10059 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10060 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10061 * Handle it like it's in an invalid guest state of the outer guest.
10062 *
10063 * When the fast path is implemented, this should be changed to cause the corresponding
10064 * nested-guest VM-exit.
10065 */
10066 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10067}
10068
10069
10070/**
10071 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10072 * and only provide the instruction length.
10073 *
10074 * Unconditional VM-exit.
10075 */
10076HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10077{
10078 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10079
10080#ifdef VBOX_STRICT
10081 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10082 switch (pVmxTransient->uExitReason)
10083 {
10084 case VMX_EXIT_ENCLS:
10085 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10086 break;
10087
10088 case VMX_EXIT_VMFUNC:
10089 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10090 break;
10091 }
10092#endif
10093
10094 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10095 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10096}
10097
10098
10099/**
10100 * Nested-guest VM-exit handler for instructions that provide instruction length as
10101 * well as more information.
10102 *
10103 * Unconditional VM-exit.
10104 */
10105HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10106{
10107 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10108
10109#ifdef VBOX_STRICT
10110 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10111 switch (pVmxTransient->uExitReason)
10112 {
10113 case VMX_EXIT_GDTR_IDTR_ACCESS:
10114 case VMX_EXIT_LDTR_TR_ACCESS:
10115 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10116 break;
10117
10118 case VMX_EXIT_RDRAND:
10119 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10120 break;
10121
10122 case VMX_EXIT_RDSEED:
10123 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10124 break;
10125
10126 case VMX_EXIT_XSAVES:
10127 case VMX_EXIT_XRSTORS:
10128 /** @todo NSTVMX: Verify XSS-bitmap. */
10129 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10130 break;
10131
10132 case VMX_EXIT_UMWAIT:
10133 case VMX_EXIT_TPAUSE:
10134 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10135 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10136 break;
10137
10138 case VMX_EXIT_LOADIWKEY:
10139 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10140 break;
10141 }
10142#endif
10143
10144 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10145 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10146 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10147
10148 VMXVEXITINFO ExitInfo;
10149 RT_ZERO(ExitInfo);
10150 ExitInfo.uReason = pVmxTransient->uExitReason;
10151 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10152 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10153 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10154 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10155}
10156
10157/** @} */
10158#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10159
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette