VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 95446

Last change on this file since 95446 was 95446, checked in by vboxsync, 2 years ago

VMM/HMVMX: Must have VMEXIT for X86_CR4_OSXSAVE changes (at least for non-darwin) as we need to load XCR0 when it is enabled. [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 492.3 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 95446 2022-06-30 00:54:01Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/** Assert that all the given fields have been read from the VMCS. */
42#ifdef VBOX_STRICT
43# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
44 do { \
45 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
46 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
47 } while (0)
48#else
49# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
50#endif
51
52/**
53 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
54 * guest using hardware-assisted VMX.
55 *
56 * This excludes state like GPRs (other than RSP) which are always are
57 * swapped and restored across the world-switch and also registers like EFER,
58 * MSR which cannot be modified by the guest without causing a VM-exit.
59 */
60#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
61 | CPUMCTX_EXTRN_RFLAGS \
62 | CPUMCTX_EXTRN_RSP \
63 | CPUMCTX_EXTRN_SREG_MASK \
64 | CPUMCTX_EXTRN_TABLE_MASK \
65 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
66 | CPUMCTX_EXTRN_SYSCALL_MSRS \
67 | CPUMCTX_EXTRN_SYSENTER_MSRS \
68 | CPUMCTX_EXTRN_TSC_AUX \
69 | CPUMCTX_EXTRN_OTHER_MSRS \
70 | CPUMCTX_EXTRN_CR0 \
71 | CPUMCTX_EXTRN_CR3 \
72 | CPUMCTX_EXTRN_CR4 \
73 | CPUMCTX_EXTRN_DR7 \
74 | CPUMCTX_EXTRN_HWVIRT \
75 | CPUMCTX_EXTRN_INHIBIT_INT \
76 | CPUMCTX_EXTRN_INHIBIT_NMI)
77
78/**
79 * Exception bitmap mask for real-mode guests (real-on-v86).
80 *
81 * We need to intercept all exceptions manually except:
82 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
83 * due to bugs in Intel CPUs.
84 * - \#PF need not be intercepted even in real-mode if we have nested paging
85 * support.
86 */
87#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
88 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
89 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
90 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
91 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
92 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
93 | RT_BIT(X86_XCPT_XF))
94
95/** Maximum VM-instruction error number. */
96#define HMVMX_INSTR_ERROR_MAX 28
97
98/** Profiling macro. */
99#ifdef HM_PROFILE_EXIT_DISPATCH
100# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
101# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
102#else
103# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
104# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
105#endif
106
107#ifndef IN_NEM_DARWIN
108/** Assert that preemption is disabled or covered by thread-context hooks. */
109# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
110 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
111
112/** Assert that we haven't migrated CPUs when thread-context hooks are not
113 * used. */
114# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
115 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
116 ("Illegal migration! Entered on CPU %u Current %u\n", \
117 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
118#else
119# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
120# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
121#endif
122
123/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
124 * context. */
125#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
126 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
127 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
128
129/** Log the VM-exit reason with an easily visible marker to identify it in a
130 * potential sea of logging data. */
131#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
132 do { \
133 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
134 HMGetVmxExitName(a_uExitReason))); \
135 } while (0) \
136
137
138/*********************************************************************************************************************************
139* Structures and Typedefs *
140*********************************************************************************************************************************/
141/**
142 * Memory operand read or write access.
143 */
144typedef enum VMXMEMACCESS
145{
146 VMXMEMACCESS_READ = 0,
147 VMXMEMACCESS_WRITE = 1
148} VMXMEMACCESS;
149
150
151/**
152 * VMX VM-exit handler.
153 *
154 * @returns Strict VBox status code (i.e. informational status codes too).
155 * @param pVCpu The cross context virtual CPU structure.
156 * @param pVmxTransient The VMX-transient structure.
157 */
158#ifndef HMVMX_USE_FUNCTION_TABLE
159typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
160#else
161typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
162/** Pointer to VM-exit handler. */
163typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
164#endif
165
166/**
167 * VMX VM-exit handler, non-strict status code.
168 *
169 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
170 *
171 * @returns VBox status code, no informational status code returned.
172 * @param pVCpu The cross context virtual CPU structure.
173 * @param pVmxTransient The VMX-transient structure.
174 *
175 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
176 * use of that status code will be replaced with VINF_EM_SOMETHING
177 * later when switching over to IEM.
178 */
179#ifndef HMVMX_USE_FUNCTION_TABLE
180typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
181#else
182typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
183#endif
184
185
186/*********************************************************************************************************************************
187* Internal Functions *
188*********************************************************************************************************************************/
189#ifndef HMVMX_USE_FUNCTION_TABLE
190DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
191# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
192# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
193#else
194# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
195# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
196#endif
197#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
198DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
199#endif
200
201static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
202
203/** @name VM-exit handler prototypes.
204 * @{
205 */
206static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
207static FNVMXEXITHANDLER vmxHCExitExtInt;
208static FNVMXEXITHANDLER vmxHCExitTripleFault;
209static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
210static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
211static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
212static FNVMXEXITHANDLER vmxHCExitCpuid;
213static FNVMXEXITHANDLER vmxHCExitGetsec;
214static FNVMXEXITHANDLER vmxHCExitHlt;
215static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
216static FNVMXEXITHANDLER vmxHCExitInvlpg;
217static FNVMXEXITHANDLER vmxHCExitRdpmc;
218static FNVMXEXITHANDLER vmxHCExitVmcall;
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220static FNVMXEXITHANDLER vmxHCExitVmclear;
221static FNVMXEXITHANDLER vmxHCExitVmlaunch;
222static FNVMXEXITHANDLER vmxHCExitVmptrld;
223static FNVMXEXITHANDLER vmxHCExitVmptrst;
224static FNVMXEXITHANDLER vmxHCExitVmread;
225static FNVMXEXITHANDLER vmxHCExitVmresume;
226static FNVMXEXITHANDLER vmxHCExitVmwrite;
227static FNVMXEXITHANDLER vmxHCExitVmxoff;
228static FNVMXEXITHANDLER vmxHCExitVmxon;
229static FNVMXEXITHANDLER vmxHCExitInvvpid;
230# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
231static FNVMXEXITHANDLER vmxHCExitInvept;
232# endif
233#endif
234static FNVMXEXITHANDLER vmxHCExitRdtsc;
235static FNVMXEXITHANDLER vmxHCExitMovCRx;
236static FNVMXEXITHANDLER vmxHCExitMovDRx;
237static FNVMXEXITHANDLER vmxHCExitIoInstr;
238static FNVMXEXITHANDLER vmxHCExitRdmsr;
239static FNVMXEXITHANDLER vmxHCExitWrmsr;
240static FNVMXEXITHANDLER vmxHCExitMwait;
241static FNVMXEXITHANDLER vmxHCExitMtf;
242static FNVMXEXITHANDLER vmxHCExitMonitor;
243static FNVMXEXITHANDLER vmxHCExitPause;
244static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
245static FNVMXEXITHANDLER vmxHCExitApicAccess;
246static FNVMXEXITHANDLER vmxHCExitEptViolation;
247static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
248static FNVMXEXITHANDLER vmxHCExitRdtscp;
249static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
250static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
251static FNVMXEXITHANDLER vmxHCExitXsetbv;
252static FNVMXEXITHANDLER vmxHCExitInvpcid;
253static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
254static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
255static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
256/** @} */
257
258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
259/** @name Nested-guest VM-exit handler prototypes.
260 * @{
261 */
262static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
263static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
264static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
265static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
266static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
267static FNVMXEXITHANDLER vmxHCExitHltNested;
268static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
269static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
270static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
271static FNVMXEXITHANDLER vmxHCExitRdtscNested;
272static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
273static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
274static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
275static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
276static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
277static FNVMXEXITHANDLER vmxHCExitMwaitNested;
278static FNVMXEXITHANDLER vmxHCExitMtfNested;
279static FNVMXEXITHANDLER vmxHCExitMonitorNested;
280static FNVMXEXITHANDLER vmxHCExitPauseNested;
281static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
282static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
283static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
284static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
287static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
288static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
289static FNVMXEXITHANDLER vmxHCExitInstrNested;
290static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
291# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
292static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
293static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
294# endif
295/** @} */
296#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
297
298
299/*********************************************************************************************************************************
300* Global Variables *
301*********************************************************************************************************************************/
302#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
303/**
304 * Array of all VMCS fields.
305 * Any fields added to the VT-x spec. should be added here.
306 *
307 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
308 * of nested-guests.
309 */
310static const uint32_t g_aVmcsFields[] =
311{
312 /* 16-bit control fields. */
313 VMX_VMCS16_VPID,
314 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
315 VMX_VMCS16_EPTP_INDEX,
316
317 /* 16-bit guest-state fields. */
318 VMX_VMCS16_GUEST_ES_SEL,
319 VMX_VMCS16_GUEST_CS_SEL,
320 VMX_VMCS16_GUEST_SS_SEL,
321 VMX_VMCS16_GUEST_DS_SEL,
322 VMX_VMCS16_GUEST_FS_SEL,
323 VMX_VMCS16_GUEST_GS_SEL,
324 VMX_VMCS16_GUEST_LDTR_SEL,
325 VMX_VMCS16_GUEST_TR_SEL,
326 VMX_VMCS16_GUEST_INTR_STATUS,
327 VMX_VMCS16_GUEST_PML_INDEX,
328
329 /* 16-bits host-state fields. */
330 VMX_VMCS16_HOST_ES_SEL,
331 VMX_VMCS16_HOST_CS_SEL,
332 VMX_VMCS16_HOST_SS_SEL,
333 VMX_VMCS16_HOST_DS_SEL,
334 VMX_VMCS16_HOST_FS_SEL,
335 VMX_VMCS16_HOST_GS_SEL,
336 VMX_VMCS16_HOST_TR_SEL,
337
338 /* 64-bit control fields. */
339 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
340 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
341 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
342 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
343 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
344 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
345 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
346 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
347 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
348 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
349 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
350 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
351 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
352 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
353 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
354 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
355 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
356 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
357 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
358 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
359 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
360 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
361 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
362 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
363 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
364 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
365 VMX_VMCS64_CTRL_EPTP_FULL,
366 VMX_VMCS64_CTRL_EPTP_HIGH,
367 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
368 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
369 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
370 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
371 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
372 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
373 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
374 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
375 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
376 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
377 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
378 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
379 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
380 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
381 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
382 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
383 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
384 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
385 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
386 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
387 VMX_VMCS64_CTRL_SPPTP_FULL,
388 VMX_VMCS64_CTRL_SPPTP_HIGH,
389 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
390 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
391 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
392 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
393 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
394 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
395
396 /* 64-bit read-only data fields. */
397 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
398 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
399
400 /* 64-bit guest-state fields. */
401 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
402 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
403 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
404 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
405 VMX_VMCS64_GUEST_PAT_FULL,
406 VMX_VMCS64_GUEST_PAT_HIGH,
407 VMX_VMCS64_GUEST_EFER_FULL,
408 VMX_VMCS64_GUEST_EFER_HIGH,
409 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
410 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
411 VMX_VMCS64_GUEST_PDPTE0_FULL,
412 VMX_VMCS64_GUEST_PDPTE0_HIGH,
413 VMX_VMCS64_GUEST_PDPTE1_FULL,
414 VMX_VMCS64_GUEST_PDPTE1_HIGH,
415 VMX_VMCS64_GUEST_PDPTE2_FULL,
416 VMX_VMCS64_GUEST_PDPTE2_HIGH,
417 VMX_VMCS64_GUEST_PDPTE3_FULL,
418 VMX_VMCS64_GUEST_PDPTE3_HIGH,
419 VMX_VMCS64_GUEST_BNDCFGS_FULL,
420 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
421 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
422 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
423 VMX_VMCS64_GUEST_PKRS_FULL,
424 VMX_VMCS64_GUEST_PKRS_HIGH,
425
426 /* 64-bit host-state fields. */
427 VMX_VMCS64_HOST_PAT_FULL,
428 VMX_VMCS64_HOST_PAT_HIGH,
429 VMX_VMCS64_HOST_EFER_FULL,
430 VMX_VMCS64_HOST_EFER_HIGH,
431 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
432 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
433 VMX_VMCS64_HOST_PKRS_FULL,
434 VMX_VMCS64_HOST_PKRS_HIGH,
435
436 /* 32-bit control fields. */
437 VMX_VMCS32_CTRL_PIN_EXEC,
438 VMX_VMCS32_CTRL_PROC_EXEC,
439 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
440 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
441 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
442 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
443 VMX_VMCS32_CTRL_EXIT,
444 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
445 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
446 VMX_VMCS32_CTRL_ENTRY,
447 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
448 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
449 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
450 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
451 VMX_VMCS32_CTRL_TPR_THRESHOLD,
452 VMX_VMCS32_CTRL_PROC_EXEC2,
453 VMX_VMCS32_CTRL_PLE_GAP,
454 VMX_VMCS32_CTRL_PLE_WINDOW,
455
456 /* 32-bits read-only fields. */
457 VMX_VMCS32_RO_VM_INSTR_ERROR,
458 VMX_VMCS32_RO_EXIT_REASON,
459 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
460 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
461 VMX_VMCS32_RO_IDT_VECTORING_INFO,
462 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
463 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
464 VMX_VMCS32_RO_EXIT_INSTR_INFO,
465
466 /* 32-bit guest-state fields. */
467 VMX_VMCS32_GUEST_ES_LIMIT,
468 VMX_VMCS32_GUEST_CS_LIMIT,
469 VMX_VMCS32_GUEST_SS_LIMIT,
470 VMX_VMCS32_GUEST_DS_LIMIT,
471 VMX_VMCS32_GUEST_FS_LIMIT,
472 VMX_VMCS32_GUEST_GS_LIMIT,
473 VMX_VMCS32_GUEST_LDTR_LIMIT,
474 VMX_VMCS32_GUEST_TR_LIMIT,
475 VMX_VMCS32_GUEST_GDTR_LIMIT,
476 VMX_VMCS32_GUEST_IDTR_LIMIT,
477 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
478 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
479 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
480 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
481 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
482 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
483 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
484 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
485 VMX_VMCS32_GUEST_INT_STATE,
486 VMX_VMCS32_GUEST_ACTIVITY_STATE,
487 VMX_VMCS32_GUEST_SMBASE,
488 VMX_VMCS32_GUEST_SYSENTER_CS,
489 VMX_VMCS32_PREEMPT_TIMER_VALUE,
490
491 /* 32-bit host-state fields. */
492 VMX_VMCS32_HOST_SYSENTER_CS,
493
494 /* Natural-width control fields. */
495 VMX_VMCS_CTRL_CR0_MASK,
496 VMX_VMCS_CTRL_CR4_MASK,
497 VMX_VMCS_CTRL_CR0_READ_SHADOW,
498 VMX_VMCS_CTRL_CR4_READ_SHADOW,
499 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
500 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
501 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
502 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
503
504 /* Natural-width read-only data fields. */
505 VMX_VMCS_RO_EXIT_QUALIFICATION,
506 VMX_VMCS_RO_IO_RCX,
507 VMX_VMCS_RO_IO_RSI,
508 VMX_VMCS_RO_IO_RDI,
509 VMX_VMCS_RO_IO_RIP,
510 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
511
512 /* Natural-width guest-state field */
513 VMX_VMCS_GUEST_CR0,
514 VMX_VMCS_GUEST_CR3,
515 VMX_VMCS_GUEST_CR4,
516 VMX_VMCS_GUEST_ES_BASE,
517 VMX_VMCS_GUEST_CS_BASE,
518 VMX_VMCS_GUEST_SS_BASE,
519 VMX_VMCS_GUEST_DS_BASE,
520 VMX_VMCS_GUEST_FS_BASE,
521 VMX_VMCS_GUEST_GS_BASE,
522 VMX_VMCS_GUEST_LDTR_BASE,
523 VMX_VMCS_GUEST_TR_BASE,
524 VMX_VMCS_GUEST_GDTR_BASE,
525 VMX_VMCS_GUEST_IDTR_BASE,
526 VMX_VMCS_GUEST_DR7,
527 VMX_VMCS_GUEST_RSP,
528 VMX_VMCS_GUEST_RIP,
529 VMX_VMCS_GUEST_RFLAGS,
530 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
531 VMX_VMCS_GUEST_SYSENTER_ESP,
532 VMX_VMCS_GUEST_SYSENTER_EIP,
533 VMX_VMCS_GUEST_S_CET,
534 VMX_VMCS_GUEST_SSP,
535 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
536
537 /* Natural-width host-state fields */
538 VMX_VMCS_HOST_CR0,
539 VMX_VMCS_HOST_CR3,
540 VMX_VMCS_HOST_CR4,
541 VMX_VMCS_HOST_FS_BASE,
542 VMX_VMCS_HOST_GS_BASE,
543 VMX_VMCS_HOST_TR_BASE,
544 VMX_VMCS_HOST_GDTR_BASE,
545 VMX_VMCS_HOST_IDTR_BASE,
546 VMX_VMCS_HOST_SYSENTER_ESP,
547 VMX_VMCS_HOST_SYSENTER_EIP,
548 VMX_VMCS_HOST_RSP,
549 VMX_VMCS_HOST_RIP,
550 VMX_VMCS_HOST_S_CET,
551 VMX_VMCS_HOST_SSP,
552 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
553};
554#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
555
556#ifdef VBOX_STRICT
557static const uint32_t g_aVmcsSegBase[] =
558{
559 VMX_VMCS_GUEST_ES_BASE,
560 VMX_VMCS_GUEST_CS_BASE,
561 VMX_VMCS_GUEST_SS_BASE,
562 VMX_VMCS_GUEST_DS_BASE,
563 VMX_VMCS_GUEST_FS_BASE,
564 VMX_VMCS_GUEST_GS_BASE
565};
566static const uint32_t g_aVmcsSegSel[] =
567{
568 VMX_VMCS16_GUEST_ES_SEL,
569 VMX_VMCS16_GUEST_CS_SEL,
570 VMX_VMCS16_GUEST_SS_SEL,
571 VMX_VMCS16_GUEST_DS_SEL,
572 VMX_VMCS16_GUEST_FS_SEL,
573 VMX_VMCS16_GUEST_GS_SEL
574};
575static const uint32_t g_aVmcsSegLimit[] =
576{
577 VMX_VMCS32_GUEST_ES_LIMIT,
578 VMX_VMCS32_GUEST_CS_LIMIT,
579 VMX_VMCS32_GUEST_SS_LIMIT,
580 VMX_VMCS32_GUEST_DS_LIMIT,
581 VMX_VMCS32_GUEST_FS_LIMIT,
582 VMX_VMCS32_GUEST_GS_LIMIT
583};
584static const uint32_t g_aVmcsSegAttr[] =
585{
586 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
587 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
588 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
589 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
590 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
591 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
592};
593AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
594AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
595AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
596AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
597#endif /* VBOX_STRICT */
598
599#ifdef HMVMX_USE_FUNCTION_TABLE
600/**
601 * VMX_EXIT dispatch table.
602 */
603static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
604{
605 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
606 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
607 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
608 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
609 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
610 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
611 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
612 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
613 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
614 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
615 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
616 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
617 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
618 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
619 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
620 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
621 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
622 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
623 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
624#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
625 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
626 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
627 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
628 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
629 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
630 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
631 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
632 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
633 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
634#else
635 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
636 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
637 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
638 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
639 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
640 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
641 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
642 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
643 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
644#endif
645 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
646 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
647 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
648 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
649 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
650 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
651 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
652 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
653 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
654 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
655 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
656 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
657 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
658 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
659 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
660 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
661 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
662 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
663 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
664 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
665 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
666 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
667#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
668 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
669#else
670 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
671#endif
672 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
673 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
674#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
675 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
676#else
677 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
678#endif
679 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
680 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
681 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
682 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
683 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
684 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
685 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
686 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
687 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
688 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
689 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
690 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
691 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
692 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
693 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
694 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
695};
696#endif /* HMVMX_USE_FUNCTION_TABLE */
697
698#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
699static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
700{
701 /* 0 */ "(Not Used)",
702 /* 1 */ "VMCALL executed in VMX root operation.",
703 /* 2 */ "VMCLEAR with invalid physical address.",
704 /* 3 */ "VMCLEAR with VMXON pointer.",
705 /* 4 */ "VMLAUNCH with non-clear VMCS.",
706 /* 5 */ "VMRESUME with non-launched VMCS.",
707 /* 6 */ "VMRESUME after VMXOFF",
708 /* 7 */ "VM-entry with invalid control fields.",
709 /* 8 */ "VM-entry with invalid host state fields.",
710 /* 9 */ "VMPTRLD with invalid physical address.",
711 /* 10 */ "VMPTRLD with VMXON pointer.",
712 /* 11 */ "VMPTRLD with incorrect revision identifier.",
713 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
714 /* 13 */ "VMWRITE to read-only VMCS component.",
715 /* 14 */ "(Not Used)",
716 /* 15 */ "VMXON executed in VMX root operation.",
717 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
718 /* 17 */ "VM-entry with non-launched executing VMCS.",
719 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
720 /* 19 */ "VMCALL with non-clear VMCS.",
721 /* 20 */ "VMCALL with invalid VM-exit control fields.",
722 /* 21 */ "(Not Used)",
723 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
724 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
725 /* 24 */ "VMCALL with invalid SMM-monitor features.",
726 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
727 /* 26 */ "VM-entry with events blocked by MOV SS.",
728 /* 27 */ "(Not Used)",
729 /* 28 */ "Invalid operand to INVEPT/INVVPID."
730};
731#endif /* VBOX_STRICT && LOG_ENABLED */
732
733
734/**
735 * Gets the CR0 guest/host mask.
736 *
737 * These bits typically does not change through the lifetime of a VM. Any bit set in
738 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
739 * by the guest.
740 *
741 * @returns The CR0 guest/host mask.
742 * @param pVCpu The cross context virtual CPU structure.
743 */
744static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
745{
746 /*
747 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
748 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
749 *
750 * Furthermore, modifications to any bits that are reserved/unspecified currently
751 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
752 * when future CPUs specify and use currently reserved/unspecified bits.
753 */
754 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
755 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
756 * and @bugref{6944}. */
757 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
758 return ( X86_CR0_PE
759 | X86_CR0_NE
760 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
761 | X86_CR0_PG
762 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
763}
764
765
766/**
767 * Gets the CR4 guest/host mask.
768 *
769 * These bits typically does not change through the lifetime of a VM. Any bit set in
770 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
771 * by the guest.
772 *
773 * @returns The CR4 guest/host mask.
774 * @param pVCpu The cross context virtual CPU structure.
775 */
776static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
777{
778 /*
779 * We construct a mask of all CR4 bits that the guest can modify without causing
780 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
781 * a VM-exit when the guest attempts to modify them when executing using
782 * hardware-assisted VMX.
783 *
784 * When a feature is not exposed to the guest (and may be present on the host),
785 * we want to intercept guest modifications to the bit so we can emulate proper
786 * behavior (e.g., #GP).
787 *
788 * Furthermore, only modifications to those bits that don't require immediate
789 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
790 * depends on CR3 which might not always be the guest value while executing
791 * using hardware-assisted VMX.
792 */
793 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
794 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
795#ifdef IN_NEM_DARWIN
796 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
797#endif
798 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
799
800 /*
801 * Paranoia.
802 * Ensure features exposed to the guest are present on the host.
803 */
804 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
805#ifdef IN_NEM_DARWIN
806 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
807#endif
808 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
809
810 uint64_t const fGstMask = X86_CR4_PVI
811 | X86_CR4_TSD
812 | X86_CR4_DE
813 | X86_CR4_MCE
814 | X86_CR4_PCE
815 | X86_CR4_OSXMMEEXCPT
816 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
817#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
818 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
819 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
820#endif
821 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
822 return ~fGstMask;
823}
824
825
826/**
827 * Adds one or more exceptions to the exception bitmap and commits it to the current
828 * VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcptMask The exception(s) to add.
833 */
834static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
835{
836 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
837 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
838 if ((uXcptBitmap & uXcptMask) != uXcptMask)
839 {
840 uXcptBitmap |= uXcptMask;
841 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
842 AssertRC(rc);
843 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
844 }
845}
846
847
848/**
849 * Adds an exception to the exception bitmap and commits it to the current VMCS.
850 *
851 * @param pVCpu The cross context virtual CPU structure.
852 * @param pVmxTransient The VMX-transient structure.
853 * @param uXcpt The exception to add.
854 */
855static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
856{
857 Assert(uXcpt <= X86_XCPT_LAST);
858 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
859}
860
861
862/**
863 * Remove one or more exceptions from the exception bitmap and commits it to the
864 * current VMCS.
865 *
866 * This takes care of not removing the exception intercept if a nested-guest
867 * requires the exception to be intercepted.
868 *
869 * @returns VBox status code.
870 * @param pVCpu The cross context virtual CPU structure.
871 * @param pVmxTransient The VMX-transient structure.
872 * @param uXcptMask The exception(s) to remove.
873 */
874static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
875{
876 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
877 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
878 if (u32XcptBitmap & uXcptMask)
879 {
880#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
881 if (!pVmxTransient->fIsNestedGuest)
882 { /* likely */ }
883 else
884 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
885#endif
886#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
887 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
888 | RT_BIT(X86_XCPT_DE)
889 | RT_BIT(X86_XCPT_NM)
890 | RT_BIT(X86_XCPT_TS)
891 | RT_BIT(X86_XCPT_UD)
892 | RT_BIT(X86_XCPT_NP)
893 | RT_BIT(X86_XCPT_SS)
894 | RT_BIT(X86_XCPT_GP)
895 | RT_BIT(X86_XCPT_PF)
896 | RT_BIT(X86_XCPT_MF));
897#elif defined(HMVMX_ALWAYS_TRAP_PF)
898 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
899#endif
900 if (uXcptMask)
901 {
902 /* Validate we are not removing any essential exception intercepts. */
903#ifndef IN_NEM_DARWIN
904 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
905#else
906 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
907#endif
908 NOREF(pVCpu);
909 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
910 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
911
912 /* Remove it from the exception bitmap. */
913 u32XcptBitmap &= ~uXcptMask;
914
915 /* Commit and update the cache if necessary. */
916 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
917 {
918 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
919 AssertRC(rc);
920 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
921 }
922 }
923 }
924 return VINF_SUCCESS;
925}
926
927
928/**
929 * Remove an exceptions from the exception bitmap and commits it to the current
930 * VMCS.
931 *
932 * @returns VBox status code.
933 * @param pVCpu The cross context virtual CPU structure.
934 * @param pVmxTransient The VMX-transient structure.
935 * @param uXcpt The exception to remove.
936 */
937static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
938{
939 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
940}
941
942
943#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
944/**
945 * Loads the shadow VMCS specified by the VMCS info. object.
946 *
947 * @returns VBox status code.
948 * @param pVmcsInfo The VMCS info. object.
949 *
950 * @remarks Can be called with interrupts disabled.
951 */
952static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
953{
954 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
955 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
956
957 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
958 if (RT_SUCCESS(rc))
959 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
960 return rc;
961}
962
963
964/**
965 * Clears the shadow VMCS specified by the VMCS info. object.
966 *
967 * @returns VBox status code.
968 * @param pVmcsInfo The VMCS info. object.
969 *
970 * @remarks Can be called with interrupts disabled.
971 */
972static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
973{
974 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
975 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
976
977 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
978 if (RT_SUCCESS(rc))
979 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
980 return rc;
981}
982
983
984/**
985 * Switches from and to the specified VMCSes.
986 *
987 * @returns VBox status code.
988 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
989 * @param pVmcsInfoTo The VMCS info. object we are switching to.
990 *
991 * @remarks Called with interrupts disabled.
992 */
993static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
994{
995 /*
996 * Clear the VMCS we are switching out if it has not already been cleared.
997 * This will sync any CPU internal data back to the VMCS.
998 */
999 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1000 {
1001 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1002 if (RT_SUCCESS(rc))
1003 {
1004 /*
1005 * The shadow VMCS, if any, would not be active at this point since we
1006 * would have cleared it while importing the virtual hardware-virtualization
1007 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1008 * clear the shadow VMCS here, just assert for safety.
1009 */
1010 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1011 }
1012 else
1013 return rc;
1014 }
1015
1016 /*
1017 * Clear the VMCS we are switching to if it has not already been cleared.
1018 * This will initialize the VMCS launch state to "clear" required for loading it.
1019 *
1020 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1021 */
1022 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1023 {
1024 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1025 if (RT_SUCCESS(rc))
1026 { /* likely */ }
1027 else
1028 return rc;
1029 }
1030
1031 /*
1032 * Finally, load the VMCS we are switching to.
1033 */
1034 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1035}
1036
1037
1038/**
1039 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1040 * caller.
1041 *
1042 * @returns VBox status code.
1043 * @param pVCpu The cross context virtual CPU structure.
1044 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1045 * true) or guest VMCS (pass false).
1046 */
1047static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1048{
1049 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1050 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1051
1052 PVMXVMCSINFO pVmcsInfoFrom;
1053 PVMXVMCSINFO pVmcsInfoTo;
1054 if (fSwitchToNstGstVmcs)
1055 {
1056 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1057 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1058 }
1059 else
1060 {
1061 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1062 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1063 }
1064
1065 /*
1066 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1067 * preemption hook code path acquires the current VMCS.
1068 */
1069 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1070
1071 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1072 if (RT_SUCCESS(rc))
1073 {
1074 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1075 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1076
1077 /*
1078 * If we are switching to a VMCS that was executed on a different host CPU or was
1079 * never executed before, flag that we need to export the host state before executing
1080 * guest/nested-guest code using hardware-assisted VMX.
1081 *
1082 * This could probably be done in a preemptible context since the preemption hook
1083 * will flag the necessary change in host context. However, since preemption is
1084 * already disabled and to avoid making assumptions about host specific code in
1085 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1086 * disabled.
1087 */
1088 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1089 { /* likely */ }
1090 else
1091 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1092
1093 ASMSetFlags(fEFlags);
1094
1095 /*
1096 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1097 * flag that we need to update the host MSR values there. Even if we decide in the
1098 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1099 * if its content differs, we would have to update the host MSRs anyway.
1100 */
1101 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1102 }
1103 else
1104 ASMSetFlags(fEFlags);
1105 return rc;
1106}
1107#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1108
1109
1110#ifdef VBOX_STRICT
1111/**
1112 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1113 * transient structure.
1114 *
1115 * @param pVCpu The cross context virtual CPU structure.
1116 * @param pVmxTransient The VMX-transient structure.
1117 */
1118DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1119{
1120 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1121 AssertRC(rc);
1122}
1123
1124
1125/**
1126 * Reads the VM-entry exception error code field from the VMCS into
1127 * the VMX transient structure.
1128 *
1129 * @param pVCpu The cross context virtual CPU structure.
1130 * @param pVmxTransient The VMX-transient structure.
1131 */
1132DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1133{
1134 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1135 AssertRC(rc);
1136}
1137
1138
1139/**
1140 * Reads the VM-entry exception error code field from the VMCS into
1141 * the VMX transient structure.
1142 *
1143 * @param pVCpu The cross context virtual CPU structure.
1144 * @param pVmxTransient The VMX-transient structure.
1145 */
1146DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1147{
1148 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1149 AssertRC(rc);
1150}
1151#endif /* VBOX_STRICT */
1152
1153
1154/**
1155 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1156 * transient structure.
1157 *
1158 * @param pVCpu The cross context virtual CPU structure.
1159 * @param pVmxTransient The VMX-transient structure.
1160 */
1161DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1162{
1163 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1164 {
1165 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1166 AssertRC(rc);
1167 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1168 }
1169}
1170
1171
1172/**
1173 * Reads the VM-exit interruption error code from the VMCS into the VMX
1174 * transient structure.
1175 *
1176 * @param pVCpu The cross context virtual CPU structure.
1177 * @param pVmxTransient The VMX-transient structure.
1178 */
1179DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1180{
1181 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1182 {
1183 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1184 AssertRC(rc);
1185 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1186 }
1187}
1188
1189
1190/**
1191 * Reads the VM-exit instruction length field from the VMCS into the VMX
1192 * transient structure.
1193 *
1194 * @param pVCpu The cross context virtual CPU structure.
1195 * @param pVmxTransient The VMX-transient structure.
1196 */
1197DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1198{
1199 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1200 {
1201 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1202 AssertRC(rc);
1203 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1204 }
1205}
1206
1207
1208/**
1209 * Reads the VM-exit instruction-information field from the VMCS into
1210 * the VMX transient structure.
1211 *
1212 * @param pVCpu The cross context virtual CPU structure.
1213 * @param pVmxTransient The VMX-transient structure.
1214 */
1215DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1216{
1217 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1218 {
1219 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1220 AssertRC(rc);
1221 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1222 }
1223}
1224
1225
1226/**
1227 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1228 *
1229 * @param pVCpu The cross context virtual CPU structure.
1230 * @param pVmxTransient The VMX-transient structure.
1231 */
1232DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1233{
1234 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1235 {
1236 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1237 AssertRC(rc);
1238 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1239 }
1240}
1241
1242
1243/**
1244 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1245 *
1246 * @param pVCpu The cross context virtual CPU structure.
1247 * @param pVmxTransient The VMX-transient structure.
1248 */
1249DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1250{
1251 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1252 {
1253 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1254 AssertRC(rc);
1255 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1256 }
1257}
1258
1259
1260/**
1261 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1262 *
1263 * @param pVCpu The cross context virtual CPU structure.
1264 * @param pVmxTransient The VMX-transient structure.
1265 */
1266DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1267{
1268 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1269 {
1270 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1271 AssertRC(rc);
1272 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1273 }
1274}
1275
1276#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1277/**
1278 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1279 * structure.
1280 *
1281 * @param pVCpu The cross context virtual CPU structure.
1282 * @param pVmxTransient The VMX-transient structure.
1283 */
1284DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1285{
1286 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1287 {
1288 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1289 AssertRC(rc);
1290 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1291 }
1292}
1293#endif
1294
1295/**
1296 * Reads the IDT-vectoring information field from the VMCS into the VMX
1297 * transient structure.
1298 *
1299 * @param pVCpu The cross context virtual CPU structure.
1300 * @param pVmxTransient The VMX-transient structure.
1301 *
1302 * @remarks No-long-jump zone!!!
1303 */
1304DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1305{
1306 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1307 {
1308 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1309 AssertRC(rc);
1310 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1311 }
1312}
1313
1314
1315/**
1316 * Reads the IDT-vectoring error code from the VMCS into the VMX
1317 * transient structure.
1318 *
1319 * @param pVCpu The cross context virtual CPU structure.
1320 * @param pVmxTransient The VMX-transient structure.
1321 */
1322DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1323{
1324 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1325 {
1326 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1327 AssertRC(rc);
1328 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1329 }
1330}
1331
1332#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1333/**
1334 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1335 *
1336 * @param pVCpu The cross context virtual CPU structure.
1337 * @param pVmxTransient The VMX-transient structure.
1338 */
1339static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1340{
1341 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1342 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1343 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1344 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1345 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1346 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1347 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1348 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1349 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1350 AssertRC(rc);
1351 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1352 | HMVMX_READ_EXIT_INSTR_LEN
1353 | HMVMX_READ_EXIT_INSTR_INFO
1354 | HMVMX_READ_IDT_VECTORING_INFO
1355 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1356 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1357 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1358 | HMVMX_READ_GUEST_LINEAR_ADDR
1359 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1360}
1361#endif
1362
1363/**
1364 * Verifies that our cached values of the VMCS fields are all consistent with
1365 * what's actually present in the VMCS.
1366 *
1367 * @returns VBox status code.
1368 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1369 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1370 * VMCS content. HMCPU error-field is
1371 * updated, see VMX_VCI_XXX.
1372 * @param pVCpu The cross context virtual CPU structure.
1373 * @param pVmcsInfo The VMCS info. object.
1374 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1375 */
1376static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1377{
1378 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1379
1380 uint32_t u32Val;
1381 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1382 AssertRC(rc);
1383 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1384 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1385 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1386 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1387
1388 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1389 AssertRC(rc);
1390 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1391 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1392 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1393 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1394
1395 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1396 AssertRC(rc);
1397 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1398 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1399 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1400 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1401
1402 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1403 AssertRC(rc);
1404 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1405 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1406 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1407 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1408
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1414 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 uint64_t u64Val;
1420 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1421 {
1422 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1423 AssertRC(rc);
1424 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1425 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1426 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1427 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1428 }
1429
1430 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1431 AssertRC(rc);
1432 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1433 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1434 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1435 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1436
1437 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1438 AssertRC(rc);
1439 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1440 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1441 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1442 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1443
1444 NOREF(pcszVmcs);
1445 return VINF_SUCCESS;
1446}
1447
1448
1449/**
1450 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1451 * VMCS.
1452 *
1453 * This is typically required when the guest changes paging mode.
1454 *
1455 * @returns VBox status code.
1456 * @param pVCpu The cross context virtual CPU structure.
1457 * @param pVmxTransient The VMX-transient structure.
1458 *
1459 * @remarks Requires EFER.
1460 * @remarks No-long-jump zone!!!
1461 */
1462static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1463{
1464 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1465 {
1466 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1467 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1468
1469 /*
1470 * VM-entry controls.
1471 */
1472 {
1473 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1474 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1475
1476 /*
1477 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1478 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1479 *
1480 * For nested-guests, this is a mandatory VM-entry control. It's also
1481 * required because we do not want to leak host bits to the nested-guest.
1482 */
1483 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1484
1485 /*
1486 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1487 *
1488 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1489 * required to get the nested-guest working with hardware-assisted VMX execution.
1490 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1491 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1492 * here rather than while merging the guest VMCS controls.
1493 */
1494 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1495 {
1496 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1497 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1498 }
1499 else
1500 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1501
1502 /*
1503 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1504 *
1505 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1506 * regardless of whether the nested-guest VMCS specifies it because we are free to
1507 * load whatever MSRs we require and we do not need to modify the guest visible copy
1508 * of the VM-entry MSR load area.
1509 */
1510 if ( g_fHmVmxSupportsVmcsEfer
1511#ifndef IN_NEM_DARWIN
1512 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1513#endif
1514 )
1515 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1516 else
1517 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1518
1519 /*
1520 * The following should -not- be set (since we're not in SMM mode):
1521 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1522 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1523 */
1524
1525 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1526 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1527
1528 if ((fVal & fZap) == fVal)
1529 { /* likely */ }
1530 else
1531 {
1532 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1533 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1534 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1535 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1536 }
1537
1538 /* Commit it to the VMCS. */
1539 if (pVmcsInfo->u32EntryCtls != fVal)
1540 {
1541 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1542 AssertRC(rc);
1543 pVmcsInfo->u32EntryCtls = fVal;
1544 }
1545 }
1546
1547 /*
1548 * VM-exit controls.
1549 */
1550 {
1551 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1552 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1553
1554 /*
1555 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1556 * supported the 1-setting of this bit.
1557 *
1558 * For nested-guests, we set the "save debug controls" as the converse
1559 * "load debug controls" is mandatory for nested-guests anyway.
1560 */
1561 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1562
1563 /*
1564 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1565 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1566 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1567 * vmxHCExportHostMsrs().
1568 *
1569 * For nested-guests, we always set this bit as we do not support 32-bit
1570 * hosts.
1571 */
1572 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1573
1574#ifndef IN_NEM_DARWIN
1575 /*
1576 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1577 *
1578 * For nested-guests, we should use the "save IA32_EFER" control if we also
1579 * used the "load IA32_EFER" control while exporting VM-entry controls.
1580 */
1581 if ( g_fHmVmxSupportsVmcsEfer
1582 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1583 {
1584 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1585 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1586 }
1587#endif
1588
1589 /*
1590 * Enable saving of the VMX-preemption timer value on VM-exit.
1591 * For nested-guests, currently not exposed/used.
1592 */
1593 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1594 * the timer value. */
1595 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1596 {
1597 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1598 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1599 }
1600
1601 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1602 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1603
1604 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1605 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1606 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1607
1608 if ((fVal & fZap) == fVal)
1609 { /* likely */ }
1610 else
1611 {
1612 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1613 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1614 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1615 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1616 }
1617
1618 /* Commit it to the VMCS. */
1619 if (pVmcsInfo->u32ExitCtls != fVal)
1620 {
1621 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1622 AssertRC(rc);
1623 pVmcsInfo->u32ExitCtls = fVal;
1624 }
1625 }
1626
1627 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1628 }
1629 return VINF_SUCCESS;
1630}
1631
1632
1633/**
1634 * Sets the TPR threshold in the VMCS.
1635 *
1636 * @param pVCpu The cross context virtual CPU structure.
1637 * @param pVmcsInfo The VMCS info. object.
1638 * @param u32TprThreshold The TPR threshold (task-priority class only).
1639 */
1640DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1641{
1642 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1643 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1644 RT_NOREF(pVmcsInfo);
1645 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1646 AssertRC(rc);
1647}
1648
1649
1650/**
1651 * Exports the guest APIC TPR state into the VMCS.
1652 *
1653 * @param pVCpu The cross context virtual CPU structure.
1654 * @param pVmxTransient The VMX-transient structure.
1655 *
1656 * @remarks No-long-jump zone!!!
1657 */
1658static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1659{
1660 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1661 {
1662 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1663
1664 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1665 if (!pVmxTransient->fIsNestedGuest)
1666 {
1667 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1668 && APICIsEnabled(pVCpu))
1669 {
1670 /*
1671 * Setup TPR shadowing.
1672 */
1673 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1674 {
1675 bool fPendingIntr = false;
1676 uint8_t u8Tpr = 0;
1677 uint8_t u8PendingIntr = 0;
1678 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1679 AssertRC(rc);
1680
1681 /*
1682 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1683 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1684 * priority of the pending interrupt so we can deliver the interrupt. If there
1685 * are no interrupts pending, set threshold to 0 to not cause any
1686 * TPR-below-threshold VM-exits.
1687 */
1688 uint32_t u32TprThreshold = 0;
1689 if (fPendingIntr)
1690 {
1691 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1692 (which is the Task-Priority Class). */
1693 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1694 const uint8_t u8TprPriority = u8Tpr >> 4;
1695 if (u8PendingPriority <= u8TprPriority)
1696 u32TprThreshold = u8PendingPriority;
1697 }
1698
1699 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1700 }
1701 }
1702 }
1703 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1704 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1705 }
1706}
1707
1708
1709/**
1710 * Gets the guest interruptibility-state and updates related force-flags.
1711 *
1712 * @returns Guest's interruptibility-state.
1713 * @param pVCpu The cross context virtual CPU structure.
1714 *
1715 * @remarks No-long-jump zone!!!
1716 */
1717static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1718{
1719 /*
1720 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1721 */
1722 uint32_t fIntrState = 0;
1723 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1724 {
1725 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1726 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1727
1728 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1729 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1730 {
1731 if (pCtx->eflags.Bits.u1IF)
1732 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1733 else
1734 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1735 }
1736 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1737 {
1738 /*
1739 * We can clear the inhibit force flag as even if we go back to the recompiler
1740 * without executing guest code in VT-x, the flag's condition to be cleared is
1741 * met and thus the cleared state is correct.
1742 */
1743 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1744 }
1745 }
1746
1747 /*
1748 * Check if we should inhibit NMI delivery.
1749 */
1750 if (CPUMIsGuestNmiBlocking(pVCpu))
1751 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1752
1753 /*
1754 * Validate.
1755 */
1756#ifdef VBOX_STRICT
1757 /* We don't support block-by-SMI yet.*/
1758 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1759
1760 /* Block-by-STI must not be set when interrupts are disabled. */
1761 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1762 {
1763 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1764 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1765 }
1766#endif
1767
1768 return fIntrState;
1769}
1770
1771
1772/**
1773 * Exports the exception intercepts required for guest execution in the VMCS.
1774 *
1775 * @param pVCpu The cross context virtual CPU structure.
1776 * @param pVmxTransient The VMX-transient structure.
1777 *
1778 * @remarks No-long-jump zone!!!
1779 */
1780static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1781{
1782 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1783 {
1784 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1785 if ( !pVmxTransient->fIsNestedGuest
1786 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1787 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1788 else
1789 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1790
1791 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1792 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1793 }
1794}
1795
1796
1797/**
1798 * Exports the guest's RIP into the guest-state area in the VMCS.
1799 *
1800 * @param pVCpu The cross context virtual CPU structure.
1801 *
1802 * @remarks No-long-jump zone!!!
1803 */
1804static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1805{
1806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1807 {
1808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1809
1810 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1811 AssertRC(rc);
1812
1813 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1814 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1815 }
1816}
1817
1818
1819/**
1820 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1821 *
1822 * @param pVCpu The cross context virtual CPU structure.
1823 * @param pVmxTransient The VMX-transient structure.
1824 *
1825 * @remarks No-long-jump zone!!!
1826 */
1827static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1828{
1829 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1830 {
1831 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1832
1833 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1834 Let us assert it as such and use 32-bit VMWRITE. */
1835 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1836 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1837 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1838 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1839
1840#ifndef IN_NEM_DARWIN
1841 /*
1842 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1843 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1844 * can run the real-mode guest code under Virtual 8086 mode.
1845 */
1846 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1847 if (pVmcsInfo->RealMode.fRealOnV86Active)
1848 {
1849 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1850 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1851 Assert(!pVmxTransient->fIsNestedGuest);
1852 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1853 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1854 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1855 }
1856#else
1857 RT_NOREF(pVmxTransient);
1858#endif
1859
1860 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1861 AssertRC(rc);
1862
1863 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1864 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1865 }
1866}
1867
1868
1869#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1870/**
1871 * Copies the nested-guest VMCS to the shadow VMCS.
1872 *
1873 * @returns VBox status code.
1874 * @param pVCpu The cross context virtual CPU structure.
1875 * @param pVmcsInfo The VMCS info. object.
1876 *
1877 * @remarks No-long-jump zone!!!
1878 */
1879static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1880{
1881 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1882 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1883
1884 /*
1885 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1886 * current VMCS, as we may try saving guest lazy MSRs.
1887 *
1888 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1889 * calling the import VMCS code which is currently performing the guest MSR reads
1890 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1891 * and the rest of the VMX leave session machinery.
1892 */
1893 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1894
1895 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1896 if (RT_SUCCESS(rc))
1897 {
1898 /*
1899 * Copy all guest read/write VMCS fields.
1900 *
1901 * We don't check for VMWRITE failures here for performance reasons and
1902 * because they are not expected to fail, barring irrecoverable conditions
1903 * like hardware errors.
1904 */
1905 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1906 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1907 {
1908 uint64_t u64Val;
1909 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1910 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1911 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1912 }
1913
1914 /*
1915 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1916 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1917 */
1918 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1919 {
1920 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1921 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1922 {
1923 uint64_t u64Val;
1924 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1925 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1926 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1927 }
1928 }
1929
1930 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1931 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1932 }
1933
1934 ASMSetFlags(fEFlags);
1935 return rc;
1936}
1937
1938
1939/**
1940 * Copies the shadow VMCS to the nested-guest VMCS.
1941 *
1942 * @returns VBox status code.
1943 * @param pVCpu The cross context virtual CPU structure.
1944 * @param pVmcsInfo The VMCS info. object.
1945 *
1946 * @remarks Called with interrupts disabled.
1947 */
1948static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1949{
1950 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1951 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1952 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1953
1954 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1955 if (RT_SUCCESS(rc))
1956 {
1957 /*
1958 * Copy guest read/write fields from the shadow VMCS.
1959 * Guest read-only fields cannot be modified, so no need to copy them.
1960 *
1961 * We don't check for VMREAD failures here for performance reasons and
1962 * because they are not expected to fail, barring irrecoverable conditions
1963 * like hardware errors.
1964 */
1965 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1966 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1967 {
1968 uint64_t u64Val;
1969 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1970 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1971 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1972 }
1973
1974 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1975 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1976 }
1977 return rc;
1978}
1979
1980
1981/**
1982 * Enables VMCS shadowing for the given VMCS info. object.
1983 *
1984 * @param pVCpu The cross context virtual CPU structure.
1985 * @param pVmcsInfo The VMCS info. object.
1986 *
1987 * @remarks No-long-jump zone!!!
1988 */
1989static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1990{
1991 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1992 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1993 {
1994 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1995 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1996 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1997 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1998 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1999 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2000 Log4Func(("Enabled\n"));
2001 }
2002}
2003
2004
2005/**
2006 * Disables VMCS shadowing for the given VMCS info. object.
2007 *
2008 * @param pVCpu The cross context virtual CPU structure.
2009 * @param pVmcsInfo The VMCS info. object.
2010 *
2011 * @remarks No-long-jump zone!!!
2012 */
2013static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2014{
2015 /*
2016 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2017 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2018 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2019 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2020 *
2021 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2022 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2023 */
2024 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2025 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2026 {
2027 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2028 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2029 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2030 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2031 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2032 Log4Func(("Disabled\n"));
2033 }
2034}
2035#endif
2036
2037
2038/**
2039 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2040 *
2041 * The guest FPU state is always pre-loaded hence we don't need to bother about
2042 * sharing FPU related CR0 bits between the guest and host.
2043 *
2044 * @returns VBox status code.
2045 * @param pVCpu The cross context virtual CPU structure.
2046 * @param pVmxTransient The VMX-transient structure.
2047 *
2048 * @remarks No-long-jump zone!!!
2049 */
2050static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2051{
2052 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2053 {
2054 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2055 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2056
2057 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2058 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2059 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2060 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2061 else
2062 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2063
2064 if (!pVmxTransient->fIsNestedGuest)
2065 {
2066 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2067 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2068 uint64_t const u64ShadowCr0 = u64GuestCr0;
2069 Assert(!RT_HI_U32(u64GuestCr0));
2070
2071 /*
2072 * Setup VT-x's view of the guest CR0.
2073 */
2074 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2075 if (VM_IS_VMX_NESTED_PAGING(pVM))
2076 {
2077#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2078 if (CPUMIsGuestPagingEnabled(pVCpu))
2079 {
2080 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2081 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2082 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2083 }
2084 else
2085 {
2086 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2087 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2088 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2089 }
2090
2091 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2092 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2093 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2094#endif
2095 }
2096 else
2097 {
2098 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2099 u64GuestCr0 |= X86_CR0_WP;
2100 }
2101
2102 /*
2103 * Guest FPU bits.
2104 *
2105 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2106 * using CR0.TS.
2107 *
2108 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2109 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2110 */
2111 u64GuestCr0 |= X86_CR0_NE;
2112
2113 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2114 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2115
2116 /*
2117 * Update exception intercepts.
2118 */
2119 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2120#ifndef IN_NEM_DARWIN
2121 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2122 {
2123 Assert(PDMVmmDevHeapIsEnabled(pVM));
2124 Assert(pVM->hm.s.vmx.pRealModeTSS);
2125 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2126 }
2127 else
2128#endif
2129 {
2130 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2131 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2132 if (fInterceptMF)
2133 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2134 }
2135
2136 /* Additional intercepts for debugging, define these yourself explicitly. */
2137#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2138 uXcptBitmap |= 0
2139 | RT_BIT(X86_XCPT_BP)
2140 | RT_BIT(X86_XCPT_DE)
2141 | RT_BIT(X86_XCPT_NM)
2142 | RT_BIT(X86_XCPT_TS)
2143 | RT_BIT(X86_XCPT_UD)
2144 | RT_BIT(X86_XCPT_NP)
2145 | RT_BIT(X86_XCPT_SS)
2146 | RT_BIT(X86_XCPT_GP)
2147 | RT_BIT(X86_XCPT_PF)
2148 | RT_BIT(X86_XCPT_MF)
2149 ;
2150#elif defined(HMVMX_ALWAYS_TRAP_PF)
2151 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2152#endif
2153 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2154 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2155 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2156 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2157 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2158
2159 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2160 u64GuestCr0 |= fSetCr0;
2161 u64GuestCr0 &= fZapCr0;
2162 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2163
2164 /* Commit the CR0 and related fields to the guest VMCS. */
2165 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2166 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2167 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2168 {
2169 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2170 AssertRC(rc);
2171 }
2172 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2173 {
2174 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2175 AssertRC(rc);
2176 }
2177
2178 /* Update our caches. */
2179 pVmcsInfo->u32ProcCtls = uProcCtls;
2180 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2181
2182 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2183 }
2184 else
2185 {
2186 /*
2187 * With nested-guests, we may have extended the guest/host mask here since we
2188 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2189 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2190 * originally supplied. We must copy those bits from the nested-guest CR0 into
2191 * the nested-guest CR0 read-shadow.
2192 */
2193 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2194 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2195 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2196 Assert(!RT_HI_U32(u64GuestCr0));
2197 Assert(u64GuestCr0 & X86_CR0_NE);
2198
2199 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2200 u64GuestCr0 |= fSetCr0;
2201 u64GuestCr0 &= fZapCr0;
2202 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2203
2204 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2205 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2206 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2207
2208 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2209 }
2210
2211 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2212 }
2213
2214 return VINF_SUCCESS;
2215}
2216
2217
2218/**
2219 * Exports the guest control registers (CR3, CR4) into the guest-state area
2220 * in the VMCS.
2221 *
2222 * @returns VBox strict status code.
2223 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2224 * without unrestricted guest access and the VMMDev is not presently
2225 * mapped (e.g. EFI32).
2226 *
2227 * @param pVCpu The cross context virtual CPU structure.
2228 * @param pVmxTransient The VMX-transient structure.
2229 *
2230 * @remarks No-long-jump zone!!!
2231 */
2232static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2233{
2234 int rc = VINF_SUCCESS;
2235 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2236
2237 /*
2238 * Guest CR2.
2239 * It's always loaded in the assembler code. Nothing to do here.
2240 */
2241
2242 /*
2243 * Guest CR3.
2244 */
2245 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2246 {
2247 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2248
2249 if (VM_IS_VMX_NESTED_PAGING(pVM))
2250 {
2251#ifndef IN_NEM_DARWIN
2252 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2253 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2254
2255 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2256 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2257 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2258 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2259
2260 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2261 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2262 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2263
2264 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2265 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2266 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2267 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2268 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2269 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2270 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2271
2272 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2273 AssertRC(rc);
2274#endif
2275
2276 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2277 uint64_t u64GuestCr3 = pCtx->cr3;
2278 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2279 || CPUMIsGuestPagingEnabledEx(pCtx))
2280 {
2281 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2282 if (CPUMIsGuestInPAEModeEx(pCtx))
2283 {
2284 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2285 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2286 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2287 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2288 }
2289
2290 /*
2291 * The guest's view of its CR3 is unblemished with nested paging when the
2292 * guest is using paging or we have unrestricted guest execution to handle
2293 * the guest when it's not using paging.
2294 */
2295 }
2296#ifndef IN_NEM_DARWIN
2297 else
2298 {
2299 /*
2300 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2301 * thinks it accesses physical memory directly, we use our identity-mapped
2302 * page table to map guest-linear to guest-physical addresses. EPT takes care
2303 * of translating it to host-physical addresses.
2304 */
2305 RTGCPHYS GCPhys;
2306 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2307
2308 /* We obtain it here every time as the guest could have relocated this PCI region. */
2309 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2310 if (RT_SUCCESS(rc))
2311 { /* likely */ }
2312 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2313 {
2314 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2315 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2316 }
2317 else
2318 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2319
2320 u64GuestCr3 = GCPhys;
2321 }
2322#endif
2323
2324 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2325 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2326 AssertRC(rc);
2327 }
2328 else
2329 {
2330 Assert(!pVmxTransient->fIsNestedGuest);
2331 /* Non-nested paging case, just use the hypervisor's CR3. */
2332 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2333
2334 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2335 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2336 AssertRC(rc);
2337 }
2338
2339 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2340 }
2341
2342 /*
2343 * Guest CR4.
2344 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2345 */
2346 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2347 {
2348 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2349 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2350
2351 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2352 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2353
2354 /*
2355 * With nested-guests, we may have extended the guest/host mask here (since we
2356 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2357 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2358 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2359 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2360 */
2361 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2362 uint64_t u64GuestCr4 = pCtx->cr4;
2363 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2364 ? pCtx->cr4
2365 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2366 Assert(!RT_HI_U32(u64GuestCr4));
2367
2368#ifndef IN_NEM_DARWIN
2369 /*
2370 * Setup VT-x's view of the guest CR4.
2371 *
2372 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2373 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2374 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2375 *
2376 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2377 */
2378 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2379 {
2380 Assert(pVM->hm.s.vmx.pRealModeTSS);
2381 Assert(PDMVmmDevHeapIsEnabled(pVM));
2382 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2383 }
2384#endif
2385
2386 if (VM_IS_VMX_NESTED_PAGING(pVM))
2387 {
2388 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2389 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2390 {
2391 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2392 u64GuestCr4 |= X86_CR4_PSE;
2393 /* Our identity mapping is a 32-bit page directory. */
2394 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2395 }
2396 /* else use guest CR4.*/
2397 }
2398 else
2399 {
2400 Assert(!pVmxTransient->fIsNestedGuest);
2401
2402 /*
2403 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2404 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2405 */
2406 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2407 {
2408 case PGMMODE_REAL: /* Real-mode. */
2409 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2410 case PGMMODE_32_BIT: /* 32-bit paging. */
2411 {
2412 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2413 break;
2414 }
2415
2416 case PGMMODE_PAE: /* PAE paging. */
2417 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2418 {
2419 u64GuestCr4 |= X86_CR4_PAE;
2420 break;
2421 }
2422
2423 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2424 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2425 {
2426#ifdef VBOX_WITH_64_BITS_GUESTS
2427 /* For our assumption in vmxHCShouldSwapEferMsr. */
2428 Assert(u64GuestCr4 & X86_CR4_PAE);
2429 break;
2430#endif
2431 }
2432 default:
2433 AssertFailed();
2434 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2435 }
2436 }
2437
2438 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2439 u64GuestCr4 |= fSetCr4;
2440 u64GuestCr4 &= fZapCr4;
2441
2442 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2443 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2444 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2445
2446#ifndef IN_NEM_DARWIN
2447 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2448 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2449 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2450 {
2451 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2452 hmR0VmxUpdateStartVmFunction(pVCpu);
2453 }
2454#endif
2455
2456 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2457
2458 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2459 }
2460 return rc;
2461}
2462
2463
2464#ifdef VBOX_STRICT
2465/**
2466 * Strict function to validate segment registers.
2467 *
2468 * @param pVCpu The cross context virtual CPU structure.
2469 * @param pVmcsInfo The VMCS info. object.
2470 *
2471 * @remarks Will import guest CR0 on strict builds during validation of
2472 * segments.
2473 */
2474static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2475{
2476 /*
2477 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2478 *
2479 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2480 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2481 * unusable bit and doesn't change the guest-context value.
2482 */
2483 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2484 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2485 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2486 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2487 && ( !CPUMIsGuestInRealModeEx(pCtx)
2488 && !CPUMIsGuestInV86ModeEx(pCtx)))
2489 {
2490 /* Protected mode checks */
2491 /* CS */
2492 Assert(pCtx->cs.Attr.n.u1Present);
2493 Assert(!(pCtx->cs.Attr.u & 0xf00));
2494 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2495 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2496 || !(pCtx->cs.Attr.n.u1Granularity));
2497 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2498 || (pCtx->cs.Attr.n.u1Granularity));
2499 /* CS cannot be loaded with NULL in protected mode. */
2500 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2501 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2502 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2503 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2504 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2505 else
2506 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2507 /* SS */
2508 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2509 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2510 if ( !(pCtx->cr0 & X86_CR0_PE)
2511 || pCtx->cs.Attr.n.u4Type == 3)
2512 {
2513 Assert(!pCtx->ss.Attr.n.u2Dpl);
2514 }
2515 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2516 {
2517 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2518 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2519 Assert(pCtx->ss.Attr.n.u1Present);
2520 Assert(!(pCtx->ss.Attr.u & 0xf00));
2521 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2522 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2523 || !(pCtx->ss.Attr.n.u1Granularity));
2524 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2525 || (pCtx->ss.Attr.n.u1Granularity));
2526 }
2527 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2528 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->ds.Attr.n.u1Present);
2532 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->ds.Attr.u & 0xf00));
2534 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->ds.Attr.n.u1Granularity));
2537 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2538 || (pCtx->ds.Attr.n.u1Granularity));
2539 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->es.Attr.n.u1Present);
2546 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->es.Attr.u & 0xf00));
2548 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->es.Attr.n.u1Granularity));
2551 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2552 || (pCtx->es.Attr.n.u1Granularity));
2553 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->fs.Attr.n.u1Present);
2560 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->fs.Attr.u & 0xf00));
2562 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->fs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2566 || (pCtx->fs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2571 {
2572 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2573 Assert(pCtx->gs.Attr.n.u1Present);
2574 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2575 Assert(!(pCtx->gs.Attr.u & 0xf00));
2576 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2577 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2578 || !(pCtx->gs.Attr.n.u1Granularity));
2579 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2580 || (pCtx->gs.Attr.n.u1Granularity));
2581 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2582 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2583 }
2584 /* 64-bit capable CPUs. */
2585 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2586 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2587 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2588 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2589 }
2590 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2591 || ( CPUMIsGuestInRealModeEx(pCtx)
2592 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2593 {
2594 /* Real and v86 mode checks. */
2595 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2596 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2597#ifndef IN_NEM_DARWIN
2598 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2599 {
2600 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2601 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2602 }
2603 else
2604#endif
2605 {
2606 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2607 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2608 }
2609
2610 /* CS */
2611 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2612 Assert(pCtx->cs.u32Limit == 0xffff);
2613 Assert(u32CSAttr == 0xf3);
2614 /* SS */
2615 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2616 Assert(pCtx->ss.u32Limit == 0xffff);
2617 Assert(u32SSAttr == 0xf3);
2618 /* DS */
2619 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2620 Assert(pCtx->ds.u32Limit == 0xffff);
2621 Assert(u32DSAttr == 0xf3);
2622 /* ES */
2623 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2624 Assert(pCtx->es.u32Limit == 0xffff);
2625 Assert(u32ESAttr == 0xf3);
2626 /* FS */
2627 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2628 Assert(pCtx->fs.u32Limit == 0xffff);
2629 Assert(u32FSAttr == 0xf3);
2630 /* GS */
2631 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2632 Assert(pCtx->gs.u32Limit == 0xffff);
2633 Assert(u32GSAttr == 0xf3);
2634 /* 64-bit capable CPUs. */
2635 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2636 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2637 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2638 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2639 }
2640}
2641#endif /* VBOX_STRICT */
2642
2643
2644/**
2645 * Exports a guest segment register into the guest-state area in the VMCS.
2646 *
2647 * @returns VBox status code.
2648 * @param pVCpu The cross context virtual CPU structure.
2649 * @param pVmcsInfo The VMCS info. object.
2650 * @param iSegReg The segment register number (X86_SREG_XXX).
2651 * @param pSelReg Pointer to the segment selector.
2652 *
2653 * @remarks No-long-jump zone!!!
2654 */
2655static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2656{
2657 Assert(iSegReg < X86_SREG_COUNT);
2658
2659 uint32_t u32Access = pSelReg->Attr.u;
2660#ifndef IN_NEM_DARWIN
2661 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2662#endif
2663 {
2664 /*
2665 * The way to differentiate between whether this is really a null selector or was just
2666 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2667 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2668 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2669 * NULL selectors loaded in protected-mode have their attribute as 0.
2670 */
2671 if (u32Access)
2672 { }
2673 else
2674 u32Access = X86DESCATTR_UNUSABLE;
2675 }
2676#ifndef IN_NEM_DARWIN
2677 else
2678 {
2679 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2680 u32Access = 0xf3;
2681 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2682 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2683 RT_NOREF_PV(pVCpu);
2684 }
2685#else
2686 RT_NOREF(pVmcsInfo);
2687#endif
2688
2689 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2690 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2691 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2692
2693 /*
2694 * Commit it to the VMCS.
2695 */
2696 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2697 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2698 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2699 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2700 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2701 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2702 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2703 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2704 return VINF_SUCCESS;
2705}
2706
2707
2708/**
2709 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2710 * area in the VMCS.
2711 *
2712 * @returns VBox status code.
2713 * @param pVCpu The cross context virtual CPU structure.
2714 * @param pVmxTransient The VMX-transient structure.
2715 *
2716 * @remarks Will import guest CR0 on strict builds during validation of
2717 * segments.
2718 * @remarks No-long-jump zone!!!
2719 */
2720static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2721{
2722 int rc = VERR_INTERNAL_ERROR_5;
2723#ifndef IN_NEM_DARWIN
2724 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2725#endif
2726 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2727 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2728#ifndef IN_NEM_DARWIN
2729 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2730#endif
2731
2732 /*
2733 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2734 */
2735 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2736 {
2737 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2738 {
2739 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2740#ifndef IN_NEM_DARWIN
2741 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2742 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2743#endif
2744 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2745 AssertRC(rc);
2746 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2747 }
2748
2749 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2750 {
2751 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2752#ifndef IN_NEM_DARWIN
2753 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2754 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2755#endif
2756 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2757 AssertRC(rc);
2758 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2759 }
2760
2761 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2762 {
2763 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2764#ifndef IN_NEM_DARWIN
2765 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2766 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2767#endif
2768 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2769 AssertRC(rc);
2770 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2771 }
2772
2773 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2774 {
2775 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2776#ifndef IN_NEM_DARWIN
2777 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2778 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2779#endif
2780 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2781 AssertRC(rc);
2782 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2783 }
2784
2785 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2786 {
2787 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2788#ifndef IN_NEM_DARWIN
2789 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2790 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2791#endif
2792 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2793 AssertRC(rc);
2794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2795 }
2796
2797 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2798 {
2799 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2800#ifndef IN_NEM_DARWIN
2801 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2802 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2803#endif
2804 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2805 AssertRC(rc);
2806 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2807 }
2808
2809#ifdef VBOX_STRICT
2810 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2811#endif
2812 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2813 pCtx->cs.Attr.u));
2814 }
2815
2816 /*
2817 * Guest TR.
2818 */
2819 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2820 {
2821 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2822
2823 /*
2824 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2825 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2826 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2827 */
2828 uint16_t u16Sel;
2829 uint32_t u32Limit;
2830 uint64_t u64Base;
2831 uint32_t u32AccessRights;
2832#ifndef IN_NEM_DARWIN
2833 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2834#endif
2835 {
2836 u16Sel = pCtx->tr.Sel;
2837 u32Limit = pCtx->tr.u32Limit;
2838 u64Base = pCtx->tr.u64Base;
2839 u32AccessRights = pCtx->tr.Attr.u;
2840 }
2841#ifndef IN_NEM_DARWIN
2842 else
2843 {
2844 Assert(!pVmxTransient->fIsNestedGuest);
2845 Assert(pVM->hm.s.vmx.pRealModeTSS);
2846 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2847
2848 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2849 RTGCPHYS GCPhys;
2850 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2851 AssertRCReturn(rc, rc);
2852
2853 X86DESCATTR DescAttr;
2854 DescAttr.u = 0;
2855 DescAttr.n.u1Present = 1;
2856 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2857
2858 u16Sel = 0;
2859 u32Limit = HM_VTX_TSS_SIZE;
2860 u64Base = GCPhys;
2861 u32AccessRights = DescAttr.u;
2862 }
2863#endif
2864
2865 /* Validate. */
2866 Assert(!(u16Sel & RT_BIT(2)));
2867 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2868 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2869 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2870 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2871 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2872 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2873 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2874 Assert( (u32Limit & 0xfff) == 0xfff
2875 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2876 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2877 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2878
2879 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2880 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2881 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2882 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2883
2884 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2885 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2886 }
2887
2888 /*
2889 * Guest GDTR.
2890 */
2891 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2892 {
2893 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2894
2895 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2896 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2897
2898 /* Validate. */
2899 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2900
2901 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2902 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2903 }
2904
2905 /*
2906 * Guest LDTR.
2907 */
2908 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2909 {
2910 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2911
2912 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2913 uint32_t u32Access;
2914 if ( !pVmxTransient->fIsNestedGuest
2915 && !pCtx->ldtr.Attr.u)
2916 u32Access = X86DESCATTR_UNUSABLE;
2917 else
2918 u32Access = pCtx->ldtr.Attr.u;
2919
2920 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2921 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2922 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2923 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2924
2925 /* Validate. */
2926 if (!(u32Access & X86DESCATTR_UNUSABLE))
2927 {
2928 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2929 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2930 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2931 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2932 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2933 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2934 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2935 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2936 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2937 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2938 }
2939
2940 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2941 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2942 }
2943
2944 /*
2945 * Guest IDTR.
2946 */
2947 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2948 {
2949 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2950
2951 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2952 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2953
2954 /* Validate. */
2955 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2956
2957 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2958 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2959 }
2960
2961 return VINF_SUCCESS;
2962}
2963
2964
2965/**
2966 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2967 * VM-exit interruption info type.
2968 *
2969 * @returns The IEM exception flags.
2970 * @param uVector The event vector.
2971 * @param uVmxEventType The VMX event type.
2972 *
2973 * @remarks This function currently only constructs flags required for
2974 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2975 * and CR2 aspects of an exception are not included).
2976 */
2977static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2978{
2979 uint32_t fIemXcptFlags;
2980 switch (uVmxEventType)
2981 {
2982 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2983 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2984 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2985 break;
2986
2987 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2988 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2989 break;
2990
2991 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2992 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2993 break;
2994
2995 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2996 {
2997 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2998 if (uVector == X86_XCPT_BP)
2999 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3000 else if (uVector == X86_XCPT_OF)
3001 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3002 else
3003 {
3004 fIemXcptFlags = 0;
3005 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3006 }
3007 break;
3008 }
3009
3010 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3011 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3012 break;
3013
3014 default:
3015 fIemXcptFlags = 0;
3016 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3017 break;
3018 }
3019 return fIemXcptFlags;
3020}
3021
3022
3023/**
3024 * Sets an event as a pending event to be injected into the guest.
3025 *
3026 * @param pVCpu The cross context virtual CPU structure.
3027 * @param u32IntInfo The VM-entry interruption-information field.
3028 * @param cbInstr The VM-entry instruction length in bytes (for
3029 * software interrupts, exceptions and privileged
3030 * software exceptions).
3031 * @param u32ErrCode The VM-entry exception error code.
3032 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3033 * page-fault.
3034 */
3035DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3036 RTGCUINTPTR GCPtrFaultAddress)
3037{
3038 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3039 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3040 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3041 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3042 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3043 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3044}
3045
3046
3047/**
3048 * Sets an external interrupt as pending-for-injection into the VM.
3049 *
3050 * @param pVCpu The cross context virtual CPU structure.
3051 * @param u8Interrupt The external interrupt vector.
3052 */
3053DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3054{
3055 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3056 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3057 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3058 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3059 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3060}
3061
3062
3063/**
3064 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3065 *
3066 * @param pVCpu The cross context virtual CPU structure.
3067 */
3068DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3069{
3070 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3071 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3072 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3073 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3074 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3075}
3076
3077
3078/**
3079 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3080 *
3081 * @param pVCpu The cross context virtual CPU structure.
3082 */
3083DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3084{
3085 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3087 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3088 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3089 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3090}
3091
3092
3093/**
3094 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3095 *
3096 * @param pVCpu The cross context virtual CPU structure.
3097 */
3098DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3099{
3100 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3102 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3103 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3104 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3105}
3106
3107
3108/**
3109 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3110 *
3111 * @param pVCpu The cross context virtual CPU structure.
3112 */
3113DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3114{
3115 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3118 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3119 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3120}
3121
3122
3123#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3124/**
3125 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3126 *
3127 * @param pVCpu The cross context virtual CPU structure.
3128 * @param u32ErrCode The error code for the general-protection exception.
3129 */
3130DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3131{
3132 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3134 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3135 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3136 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3137}
3138
3139
3140/**
3141 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3142 *
3143 * @param pVCpu The cross context virtual CPU structure.
3144 * @param u32ErrCode The error code for the stack exception.
3145 */
3146DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3147{
3148 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3149 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3150 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3151 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3152 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3153}
3154#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3155
3156
3157/**
3158 * Fixes up attributes for the specified segment register.
3159 *
3160 * @param pVCpu The cross context virtual CPU structure.
3161 * @param pSelReg The segment register that needs fixing.
3162 * @param pszRegName The register name (for logging and assertions).
3163 */
3164static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3165{
3166 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3167
3168 /*
3169 * If VT-x marks the segment as unusable, most other bits remain undefined:
3170 * - For CS the L, D and G bits have meaning.
3171 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3172 * - For the remaining data segments no bits are defined.
3173 *
3174 * The present bit and the unusable bit has been observed to be set at the
3175 * same time (the selector was supposed to be invalid as we started executing
3176 * a V8086 interrupt in ring-0).
3177 *
3178 * What should be important for the rest of the VBox code, is that the P bit is
3179 * cleared. Some of the other VBox code recognizes the unusable bit, but
3180 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3181 * safe side here, we'll strip off P and other bits we don't care about. If
3182 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3183 *
3184 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3185 */
3186#ifdef VBOX_STRICT
3187 uint32_t const uAttr = pSelReg->Attr.u;
3188#endif
3189
3190 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3191 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3192 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3193
3194#ifdef VBOX_STRICT
3195# ifndef IN_NEM_DARWIN
3196 VMMRZCallRing3Disable(pVCpu);
3197# endif
3198 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3199# ifdef DEBUG_bird
3200 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3201 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3202 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3203# endif
3204# ifndef IN_NEM_DARWIN
3205 VMMRZCallRing3Enable(pVCpu);
3206# endif
3207 NOREF(uAttr);
3208#endif
3209 RT_NOREF2(pVCpu, pszRegName);
3210}
3211
3212
3213/**
3214 * Imports a guest segment register from the current VMCS into the guest-CPU
3215 * context.
3216 *
3217 * @param pVCpu The cross context virtual CPU structure.
3218 * @param iSegReg The segment register number (X86_SREG_XXX).
3219 *
3220 * @remarks Called with interrupts and/or preemption disabled.
3221 */
3222static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3223{
3224 Assert(iSegReg < X86_SREG_COUNT);
3225 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3226 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3227 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3228 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3229
3230 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3231
3232 uint16_t u16Sel;
3233 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3234 pSelReg->Sel = u16Sel;
3235 pSelReg->ValidSel = u16Sel;
3236
3237 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3238 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3239
3240 uint32_t u32Attr;
3241 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3242 pSelReg->Attr.u = u32Attr;
3243 if (u32Attr & X86DESCATTR_UNUSABLE)
3244 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3245
3246 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3247}
3248
3249
3250/**
3251 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3252 *
3253 * @param pVCpu The cross context virtual CPU structure.
3254 *
3255 * @remarks Called with interrupts and/or preemption disabled.
3256 */
3257static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3258{
3259 uint16_t u16Sel;
3260 uint64_t u64Base;
3261 uint32_t u32Limit, u32Attr;
3262 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3263 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3264 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3265 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3266
3267 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3268 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3269 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3270 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3271 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3272 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3273 if (u32Attr & X86DESCATTR_UNUSABLE)
3274 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3275}
3276
3277
3278/**
3279 * Imports the guest TR from the current VMCS into the guest-CPU context.
3280 *
3281 * @param pVCpu The cross context virtual CPU structure.
3282 *
3283 * @remarks Called with interrupts and/or preemption disabled.
3284 */
3285static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3286{
3287 uint16_t u16Sel;
3288 uint64_t u64Base;
3289 uint32_t u32Limit, u32Attr;
3290 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3291 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3292 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3293 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3294
3295 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3296 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3297 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3298 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3299 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3300 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3301 /* TR is the only selector that can never be unusable. */
3302 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3303}
3304
3305
3306/**
3307 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3308 *
3309 * @param pVCpu The cross context virtual CPU structure.
3310 *
3311 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3312 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3313 * instead!!!
3314 */
3315static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3316{
3317 uint64_t u64Val;
3318 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3319 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3320 {
3321 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3322 AssertRC(rc);
3323
3324 pCtx->rip = u64Val;
3325 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3326 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3327 }
3328}
3329
3330
3331/**
3332 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3333 *
3334 * @param pVCpu The cross context virtual CPU structure.
3335 * @param pVmcsInfo The VMCS info. object.
3336 *
3337 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3338 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3339 * instead!!!
3340 */
3341static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3342{
3343 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3344 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3345 {
3346 uint64_t u64Val;
3347 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3348 AssertRC(rc);
3349
3350 pCtx->rflags.u64 = u64Val;
3351#ifndef IN_NEM_DARWIN
3352 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3353 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3354 {
3355 pCtx->eflags.Bits.u1VM = 0;
3356 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3357 }
3358#else
3359 RT_NOREF(pVmcsInfo);
3360#endif
3361 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3362 }
3363}
3364
3365
3366/**
3367 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3368 * context.
3369 *
3370 * @param pVCpu The cross context virtual CPU structure.
3371 * @param pVmcsInfo The VMCS info. object.
3372 *
3373 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3374 * do not log!
3375 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3376 * instead!!!
3377 */
3378static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3379{
3380 uint32_t u32Val;
3381 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3382 if (!u32Val)
3383 {
3384 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3385 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3386 CPUMSetGuestNmiBlocking(pVCpu, false);
3387 }
3388 else
3389 {
3390 /*
3391 * We must import RIP here to set our EM interrupt-inhibited state.
3392 * We also import RFLAGS as our code that evaluates pending interrupts
3393 * before VM-entry requires it.
3394 */
3395 vmxHCImportGuestRip(pVCpu);
3396 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3397
3398 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3399 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3400 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3401 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3402
3403 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3404 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3405 }
3406}
3407
3408
3409/**
3410 * Worker for VMXR0ImportStateOnDemand.
3411 *
3412 * @returns VBox status code.
3413 * @param pVCpu The cross context virtual CPU structure.
3414 * @param pVmcsInfo The VMCS info. object.
3415 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3416 */
3417static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3418{
3419 int rc = VINF_SUCCESS;
3420 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3421 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3422 uint32_t u32Val;
3423
3424 /*
3425 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3426 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3427 * neither are other host platforms.
3428 *
3429 * Committing this temporarily as it prevents BSOD.
3430 *
3431 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3432 */
3433# ifdef RT_OS_WINDOWS
3434 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3435 return VERR_HM_IPE_1;
3436# endif
3437
3438 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3439
3440#ifndef IN_NEM_DARWIN
3441 /*
3442 * We disable interrupts to make the updating of the state and in particular
3443 * the fExtrn modification atomic wrt to preemption hooks.
3444 */
3445 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3446#endif
3447
3448 fWhat &= pCtx->fExtrn;
3449 if (fWhat)
3450 {
3451 do
3452 {
3453 if (fWhat & CPUMCTX_EXTRN_RIP)
3454 vmxHCImportGuestRip(pVCpu);
3455
3456 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3457 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3458
3459 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3460 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3461
3462 if (fWhat & CPUMCTX_EXTRN_RSP)
3463 {
3464 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3465 AssertRC(rc);
3466 }
3467
3468 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3469 {
3470 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3471#ifndef IN_NEM_DARWIN
3472 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3473#else
3474 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3475#endif
3476 if (fWhat & CPUMCTX_EXTRN_CS)
3477 {
3478 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3479 vmxHCImportGuestRip(pVCpu);
3480 if (fRealOnV86Active)
3481 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3482 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3483 }
3484 if (fWhat & CPUMCTX_EXTRN_SS)
3485 {
3486 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3487 if (fRealOnV86Active)
3488 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3489 }
3490 if (fWhat & CPUMCTX_EXTRN_DS)
3491 {
3492 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3493 if (fRealOnV86Active)
3494 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3495 }
3496 if (fWhat & CPUMCTX_EXTRN_ES)
3497 {
3498 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3499 if (fRealOnV86Active)
3500 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3501 }
3502 if (fWhat & CPUMCTX_EXTRN_FS)
3503 {
3504 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3505 if (fRealOnV86Active)
3506 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_GS)
3509 {
3510 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3511 if (fRealOnV86Active)
3512 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3513 }
3514 }
3515
3516 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3517 {
3518 if (fWhat & CPUMCTX_EXTRN_LDTR)
3519 vmxHCImportGuestLdtr(pVCpu);
3520
3521 if (fWhat & CPUMCTX_EXTRN_GDTR)
3522 {
3523 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3524 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3525 pCtx->gdtr.cbGdt = u32Val;
3526 }
3527
3528 /* Guest IDTR. */
3529 if (fWhat & CPUMCTX_EXTRN_IDTR)
3530 {
3531 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3532 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3533 pCtx->idtr.cbIdt = u32Val;
3534 }
3535
3536 /* Guest TR. */
3537 if (fWhat & CPUMCTX_EXTRN_TR)
3538 {
3539#ifndef IN_NEM_DARWIN
3540 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3541 don't need to import that one. */
3542 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3543#endif
3544 vmxHCImportGuestTr(pVCpu);
3545 }
3546 }
3547
3548 if (fWhat & CPUMCTX_EXTRN_DR7)
3549 {
3550#ifndef IN_NEM_DARWIN
3551 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3552#endif
3553 {
3554 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3555 AssertRC(rc);
3556 }
3557 }
3558
3559 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3560 {
3561 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3562 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3563 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3564 pCtx->SysEnter.cs = u32Val;
3565 }
3566
3567#ifndef IN_NEM_DARWIN
3568 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3569 {
3570 if ( pVM->hmr0.s.fAllow64BitGuests
3571 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3572 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3573 }
3574
3575 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3576 {
3577 if ( pVM->hmr0.s.fAllow64BitGuests
3578 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3579 {
3580 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3581 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3582 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3583 }
3584 }
3585
3586 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3587 {
3588 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3589 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3590 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3591 Assert(pMsrs);
3592 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3593 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3594 for (uint32_t i = 0; i < cMsrs; i++)
3595 {
3596 uint32_t const idMsr = pMsrs[i].u32Msr;
3597 switch (idMsr)
3598 {
3599 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3600 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3601 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3602 default:
3603 {
3604 uint32_t idxLbrMsr;
3605 if (VM_IS_VMX_LBR(pVM))
3606 {
3607 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3608 {
3609 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3610 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3611 break;
3612 }
3613 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3614 {
3615 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3616 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3617 break;
3618 }
3619 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3620 {
3621 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3622 break;
3623 }
3624 /* Fallthru (no break) */
3625 }
3626 pCtx->fExtrn = 0;
3627 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3628 ASMSetFlags(fEFlags);
3629 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3630 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3631 }
3632 }
3633 }
3634 }
3635#endif
3636
3637 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3638 {
3639 if (fWhat & CPUMCTX_EXTRN_CR0)
3640 {
3641 uint64_t u64Cr0;
3642 uint64_t u64Shadow;
3643 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3644 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3645#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3646 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3647 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3648#else
3649 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3650 {
3651 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3652 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3653 }
3654 else
3655 {
3656 /*
3657 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3658 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3659 * re-construct CR0. See @bugref{9180#c95} for details.
3660 */
3661 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3662 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3663 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3664 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3665 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3666 }
3667#endif
3668#ifndef IN_NEM_DARWIN
3669 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3670#endif
3671 CPUMSetGuestCR0(pVCpu, u64Cr0);
3672#ifndef IN_NEM_DARWIN
3673 VMMRZCallRing3Enable(pVCpu);
3674#endif
3675 }
3676
3677 if (fWhat & CPUMCTX_EXTRN_CR4)
3678 {
3679 uint64_t u64Cr4;
3680 uint64_t u64Shadow;
3681 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3682 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3683#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3684 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3685 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3686#else
3687 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3688 {
3689 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3690 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3691 }
3692 else
3693 {
3694 /*
3695 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3696 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3697 * re-construct CR4. See @bugref{9180#c95} for details.
3698 */
3699 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3700 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3701 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3702 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3703 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3704 }
3705#endif
3706 pCtx->cr4 = u64Cr4;
3707 }
3708
3709 if (fWhat & CPUMCTX_EXTRN_CR3)
3710 {
3711 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3712 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3713 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3714 && CPUMIsGuestPagingEnabledEx(pCtx)))
3715 {
3716 uint64_t u64Cr3;
3717 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3718 if (pCtx->cr3 != u64Cr3)
3719 {
3720 pCtx->cr3 = u64Cr3;
3721 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3722 }
3723
3724 /*
3725 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3726 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3727 */
3728 if (CPUMIsGuestInPAEModeEx(pCtx))
3729 {
3730 X86PDPE aPaePdpes[4];
3731 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3732 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3733 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3734 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3735 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3736 {
3737 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3738 /* PGM now updates PAE PDPTEs while updating CR3. */
3739 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3740 }
3741 }
3742 }
3743 }
3744 }
3745
3746#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3747 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3748 {
3749 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3750 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3751 {
3752 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3753 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3754 if (RT_SUCCESS(rc))
3755 { /* likely */ }
3756 else
3757 break;
3758 }
3759 }
3760#endif
3761 } while (0);
3762
3763 if (RT_SUCCESS(rc))
3764 {
3765 /* Update fExtrn. */
3766 pCtx->fExtrn &= ~fWhat;
3767
3768 /* If everything has been imported, clear the HM keeper bit. */
3769 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3770 {
3771#ifndef IN_NEM_DARWIN
3772 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3773#else
3774 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3775#endif
3776 Assert(!pCtx->fExtrn);
3777 }
3778 }
3779 }
3780#ifndef IN_NEM_DARWIN
3781 else
3782 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3783
3784 /*
3785 * Restore interrupts.
3786 */
3787 ASMSetFlags(fEFlags);
3788#endif
3789
3790 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3791
3792 if (RT_SUCCESS(rc))
3793 { /* likely */ }
3794 else
3795 return rc;
3796
3797 /*
3798 * Honor any pending CR3 updates.
3799 *
3800 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3801 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3802 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3803 *
3804 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3805 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3806 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3807 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3808 *
3809 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3810 *
3811 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3812 */
3813 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3814#ifndef IN_NEM_DARWIN
3815 && VMMRZCallRing3IsEnabled(pVCpu)
3816#endif
3817 )
3818 {
3819 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3820 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3821 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3822 }
3823
3824 return VINF_SUCCESS;
3825}
3826
3827
3828/**
3829 * Check per-VM and per-VCPU force flag actions that require us to go back to
3830 * ring-3 for one reason or another.
3831 *
3832 * @returns Strict VBox status code (i.e. informational status codes too)
3833 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3834 * ring-3.
3835 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3836 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3837 * interrupts)
3838 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3839 * all EMTs to be in ring-3.
3840 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3841 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3842 * to the EM loop.
3843 *
3844 * @param pVCpu The cross context virtual CPU structure.
3845 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3846 * @param fStepping Whether we are single-stepping the guest using the
3847 * hypervisor debugger.
3848 *
3849 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3850 * is no longer in VMX non-root mode.
3851 */
3852static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3853{
3854#ifndef IN_NEM_DARWIN
3855 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3856#endif
3857
3858 /*
3859 * Update pending interrupts into the APIC's IRR.
3860 */
3861 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3862 APICUpdatePendingInterrupts(pVCpu);
3863
3864 /*
3865 * Anything pending? Should be more likely than not if we're doing a good job.
3866 */
3867 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3868 if ( !fStepping
3869 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3870 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3871 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3872 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3873 return VINF_SUCCESS;
3874
3875 /* Pending PGM C3 sync. */
3876 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3877 {
3878 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3879 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3880 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3881 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3882 if (rcStrict != VINF_SUCCESS)
3883 {
3884 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3885 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3886 return rcStrict;
3887 }
3888 }
3889
3890 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3891 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3892 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3893 {
3894 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3895 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3896 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3897 return rc;
3898 }
3899
3900 /* Pending VM request packets, such as hardware interrupts. */
3901 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3902 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3903 {
3904 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3905 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3906 return VINF_EM_PENDING_REQUEST;
3907 }
3908
3909 /* Pending PGM pool flushes. */
3910 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3911 {
3912 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3913 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3914 return VINF_PGM_POOL_FLUSH_PENDING;
3915 }
3916
3917 /* Pending DMA requests. */
3918 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3919 {
3920 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3921 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3922 return VINF_EM_RAW_TO_R3;
3923 }
3924
3925#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3926 /*
3927 * Pending nested-guest events.
3928 *
3929 * Please note the priority of these events are specified and important.
3930 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3931 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3932 */
3933 if (fIsNestedGuest)
3934 {
3935 /* Pending nested-guest APIC-write. */
3936 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3937 {
3938 Log4Func(("Pending nested-guest APIC-write\n"));
3939 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3940 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3941 return rcStrict;
3942 }
3943
3944 /* Pending nested-guest monitor-trap flag (MTF). */
3945 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3946 {
3947 Log4Func(("Pending nested-guest MTF\n"));
3948 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3949 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3950 return rcStrict;
3951 }
3952
3953 /* Pending nested-guest VMX-preemption timer expired. */
3954 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3955 {
3956 Log4Func(("Pending nested-guest preempt timer\n"));
3957 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3958 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3959 return rcStrict;
3960 }
3961 }
3962#else
3963 NOREF(fIsNestedGuest);
3964#endif
3965
3966 return VINF_SUCCESS;
3967}
3968
3969
3970/**
3971 * Converts any TRPM trap into a pending HM event. This is typically used when
3972 * entering from ring-3 (not longjmp returns).
3973 *
3974 * @param pVCpu The cross context virtual CPU structure.
3975 */
3976static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3977{
3978 Assert(TRPMHasTrap(pVCpu));
3979 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3980
3981 uint8_t uVector;
3982 TRPMEVENT enmTrpmEvent;
3983 uint32_t uErrCode;
3984 RTGCUINTPTR GCPtrFaultAddress;
3985 uint8_t cbInstr;
3986 bool fIcebp;
3987
3988 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3989 AssertRC(rc);
3990
3991 uint32_t u32IntInfo;
3992 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
3993 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
3994
3995 rc = TRPMResetTrap(pVCpu);
3996 AssertRC(rc);
3997 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
3998 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
3999
4000 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4001}
4002
4003
4004/**
4005 * Converts the pending HM event into a TRPM trap.
4006 *
4007 * @param pVCpu The cross context virtual CPU structure.
4008 */
4009static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4010{
4011 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4012
4013 /* If a trap was already pending, we did something wrong! */
4014 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4015
4016 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4017 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4018 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4019
4020 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4021
4022 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4023 AssertRC(rc);
4024
4025 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4026 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4027
4028 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4029 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4030 else
4031 {
4032 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4033 switch (uVectorType)
4034 {
4035 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4036 TRPMSetTrapDueToIcebp(pVCpu);
4037 RT_FALL_THRU();
4038 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4039 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4040 {
4041 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4042 || ( uVector == X86_XCPT_BP /* INT3 */
4043 || uVector == X86_XCPT_OF /* INTO */
4044 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4045 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4046 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4047 break;
4048 }
4049 }
4050 }
4051
4052 /* We're now done converting the pending event. */
4053 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4054}
4055
4056
4057/**
4058 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4059 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4060 *
4061 * @param pVCpu The cross context virtual CPU structure.
4062 * @param pVmcsInfo The VMCS info. object.
4063 */
4064static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4065{
4066 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4067 {
4068 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4069 {
4070 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4071 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4072 AssertRC(rc);
4073 }
4074 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4075}
4076
4077
4078/**
4079 * Clears the interrupt-window exiting control in the VMCS.
4080 *
4081 * @param pVCpu The cross context virtual CPU structure.
4082 * @param pVmcsInfo The VMCS info. object.
4083 */
4084DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4085{
4086 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4087 {
4088 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4089 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4090 AssertRC(rc);
4091 }
4092}
4093
4094
4095/**
4096 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4097 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4098 *
4099 * @param pVCpu The cross context virtual CPU structure.
4100 * @param pVmcsInfo The VMCS info. object.
4101 */
4102static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4103{
4104 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4105 {
4106 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4107 {
4108 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4109 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4110 AssertRC(rc);
4111 Log4Func(("Setup NMI-window exiting\n"));
4112 }
4113 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4114}
4115
4116
4117/**
4118 * Clears the NMI-window exiting control in the VMCS.
4119 *
4120 * @param pVCpu The cross context virtual CPU structure.
4121 * @param pVmcsInfo The VMCS info. object.
4122 */
4123DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4124{
4125 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4126 {
4127 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4128 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4129 AssertRC(rc);
4130 }
4131}
4132
4133
4134/**
4135 * Injects an event into the guest upon VM-entry by updating the relevant fields
4136 * in the VM-entry area in the VMCS.
4137 *
4138 * @returns Strict VBox status code (i.e. informational status codes too).
4139 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4140 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4141 *
4142 * @param pVCpu The cross context virtual CPU structure.
4143 * @param pVmcsInfo The VMCS info object.
4144 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4145 * @param pEvent The event being injected.
4146 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4147 * will be updated if necessary. This cannot not be NULL.
4148 * @param fStepping Whether we're single-stepping guest execution and should
4149 * return VINF_EM_DBG_STEPPED if the event is injected
4150 * directly (registers modified by us, not by hardware on
4151 * VM-entry).
4152 */
4153static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4154 bool fStepping, uint32_t *pfIntrState)
4155{
4156 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4157 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4158 Assert(pfIntrState);
4159
4160#ifdef IN_NEM_DARWIN
4161 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4162#endif
4163
4164 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4165 uint32_t u32IntInfo = pEvent->u64IntInfo;
4166 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4167 uint32_t const cbInstr = pEvent->cbInstr;
4168 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4169 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4170 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4171
4172#ifdef VBOX_STRICT
4173 /*
4174 * Validate the error-code-valid bit for hardware exceptions.
4175 * No error codes for exceptions in real-mode.
4176 *
4177 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4178 */
4179 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4180 && !CPUMIsGuestInRealModeEx(pCtx))
4181 {
4182 switch (uVector)
4183 {
4184 case X86_XCPT_PF:
4185 case X86_XCPT_DF:
4186 case X86_XCPT_TS:
4187 case X86_XCPT_NP:
4188 case X86_XCPT_SS:
4189 case X86_XCPT_GP:
4190 case X86_XCPT_AC:
4191 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4192 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4193 RT_FALL_THRU();
4194 default:
4195 break;
4196 }
4197 }
4198
4199 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4200 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4201 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4202#endif
4203
4204 RT_NOREF(uVector);
4205 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4206 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4207 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4208 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4209 {
4210 Assert(uVector <= X86_XCPT_LAST);
4211 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4212 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4213 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4214 }
4215 else
4216 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4217
4218 /*
4219 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4220 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4221 * interrupt handler in the (real-mode) guest.
4222 *
4223 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4224 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4225 */
4226 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4227 {
4228#ifndef IN_NEM_DARWIN
4229 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4230#endif
4231 {
4232 /*
4233 * For CPUs with unrestricted guest execution enabled and with the guest
4234 * in real-mode, we must not set the deliver-error-code bit.
4235 *
4236 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4237 */
4238 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4239 }
4240#ifndef IN_NEM_DARWIN
4241 else
4242 {
4243 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4244 Assert(PDMVmmDevHeapIsEnabled(pVM));
4245 Assert(pVM->hm.s.vmx.pRealModeTSS);
4246 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4247
4248 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4249 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4250 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4251 AssertRCReturn(rc2, rc2);
4252
4253 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4254 size_t const cbIdtEntry = sizeof(X86IDTR16);
4255 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4256 {
4257 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4258 if (uVector == X86_XCPT_DF)
4259 return VINF_EM_RESET;
4260
4261 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4262 No error codes for exceptions in real-mode. */
4263 if (uVector == X86_XCPT_GP)
4264 {
4265 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4266 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4267 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4268 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4269 HMEVENT EventXcptDf;
4270 RT_ZERO(EventXcptDf);
4271 EventXcptDf.u64IntInfo = uXcptDfInfo;
4272 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4273 }
4274
4275 /*
4276 * If we're injecting an event with no valid IDT entry, inject a #GP.
4277 * No error codes for exceptions in real-mode.
4278 *
4279 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4280 */
4281 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4282 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4283 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4284 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4285 HMEVENT EventXcptGp;
4286 RT_ZERO(EventXcptGp);
4287 EventXcptGp.u64IntInfo = uXcptGpInfo;
4288 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4289 }
4290
4291 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4292 uint16_t uGuestIp = pCtx->ip;
4293 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4294 {
4295 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4296 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4297 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4298 }
4299 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4300 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4301
4302 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4303 X86IDTR16 IdtEntry;
4304 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4305 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4306 AssertRCReturn(rc2, rc2);
4307
4308 /* Construct the stack frame for the interrupt/exception handler. */
4309 VBOXSTRICTRC rcStrict;
4310 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4311 if (rcStrict == VINF_SUCCESS)
4312 {
4313 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4314 if (rcStrict == VINF_SUCCESS)
4315 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4316 }
4317
4318 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4319 if (rcStrict == VINF_SUCCESS)
4320 {
4321 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4322 pCtx->rip = IdtEntry.offSel;
4323 pCtx->cs.Sel = IdtEntry.uSel;
4324 pCtx->cs.ValidSel = IdtEntry.uSel;
4325 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4326 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4327 && uVector == X86_XCPT_PF)
4328 pCtx->cr2 = GCPtrFault;
4329
4330 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4331 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4332 | HM_CHANGED_GUEST_RSP);
4333
4334 /*
4335 * If we delivered a hardware exception (other than an NMI) and if there was
4336 * block-by-STI in effect, we should clear it.
4337 */
4338 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4339 {
4340 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4341 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4342 Log4Func(("Clearing inhibition due to STI\n"));
4343 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4344 }
4345
4346 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4347 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4348
4349 /*
4350 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4351 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4352 */
4353 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4354
4355 /*
4356 * If we eventually support nested-guest execution without unrestricted guest execution,
4357 * we should set fInterceptEvents here.
4358 */
4359 Assert(!fIsNestedGuest);
4360
4361 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4362 if (fStepping)
4363 rcStrict = VINF_EM_DBG_STEPPED;
4364 }
4365 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4366 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4367 return rcStrict;
4368 }
4369#else
4370 RT_NOREF(pVmcsInfo);
4371#endif
4372 }
4373
4374 /*
4375 * Validate.
4376 */
4377 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4378 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4379
4380 /*
4381 * Inject the event into the VMCS.
4382 */
4383 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4384 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4385 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4386 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4387 AssertRC(rc);
4388
4389 /*
4390 * Update guest CR2 if this is a page-fault.
4391 */
4392 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4393 pCtx->cr2 = GCPtrFault;
4394
4395 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4396 return VINF_SUCCESS;
4397}
4398
4399
4400/**
4401 * Evaluates the event to be delivered to the guest and sets it as the pending
4402 * event.
4403 *
4404 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4405 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4406 * NOT restore these force-flags.
4407 *
4408 * @returns Strict VBox status code (i.e. informational status codes too).
4409 * @param pVCpu The cross context virtual CPU structure.
4410 * @param pVmcsInfo The VMCS information structure.
4411 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4412 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4413 */
4414static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4415{
4416 Assert(pfIntrState);
4417 Assert(!TRPMHasTrap(pVCpu));
4418
4419 /*
4420 * Compute/update guest-interruptibility state related FFs.
4421 * The FFs will be used below while evaluating events to be injected.
4422 */
4423 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4424
4425 /*
4426 * Evaluate if a new event needs to be injected.
4427 * An event that's already pending has already performed all necessary checks.
4428 */
4429 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4430 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4431 {
4432 /** @todo SMI. SMIs take priority over NMIs. */
4433
4434 /*
4435 * NMIs.
4436 * NMIs take priority over external interrupts.
4437 */
4438#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4439 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4440#endif
4441 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4442 {
4443 /*
4444 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4445 *
4446 * For a nested-guest, the FF always indicates the outer guest's ability to
4447 * receive an NMI while the guest-interruptibility state bit depends on whether
4448 * the nested-hypervisor is using virtual-NMIs.
4449 */
4450 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4451 {
4452#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4453 if ( fIsNestedGuest
4454 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4455 return IEMExecVmxVmexitXcptNmi(pVCpu);
4456#endif
4457 vmxHCSetPendingXcptNmi(pVCpu);
4458 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4459 Log4Func(("NMI pending injection\n"));
4460
4461 /* We've injected the NMI, bail. */
4462 return VINF_SUCCESS;
4463 }
4464 else if (!fIsNestedGuest)
4465 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4466 }
4467
4468 /*
4469 * External interrupts (PIC/APIC).
4470 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4471 * We cannot re-request the interrupt from the controller again.
4472 */
4473 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4474 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4475 {
4476 Assert(!DBGFIsStepping(pVCpu));
4477 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4478 AssertRC(rc);
4479
4480 /*
4481 * We must not check EFLAGS directly when executing a nested-guest, use
4482 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4483 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4484 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4485 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4486 *
4487 * See Intel spec. 25.4.1 "Event Blocking".
4488 */
4489 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4490 {
4491#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4492 if ( fIsNestedGuest
4493 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4494 {
4495 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4496 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4497 return rcStrict;
4498 }
4499#endif
4500 uint8_t u8Interrupt;
4501 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4502 if (RT_SUCCESS(rc))
4503 {
4504#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4505 if ( fIsNestedGuest
4506 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4507 {
4508 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4509 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4510 return rcStrict;
4511 }
4512#endif
4513 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4514 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4515 }
4516 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4517 {
4518 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4519
4520 if ( !fIsNestedGuest
4521 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4522 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4523 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4524
4525 /*
4526 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4527 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4528 * need to re-set this force-flag here.
4529 */
4530 }
4531 else
4532 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4533
4534 /* We've injected the interrupt or taken necessary action, bail. */
4535 return VINF_SUCCESS;
4536 }
4537 if (!fIsNestedGuest)
4538 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4539 }
4540 }
4541 else if (!fIsNestedGuest)
4542 {
4543 /*
4544 * An event is being injected or we are in an interrupt shadow. Check if another event is
4545 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4546 * the pending event.
4547 */
4548 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4549 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4550 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4551 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4552 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4553 }
4554 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4555
4556 return VINF_SUCCESS;
4557}
4558
4559
4560/**
4561 * Injects any pending events into the guest if the guest is in a state to
4562 * receive them.
4563 *
4564 * @returns Strict VBox status code (i.e. informational status codes too).
4565 * @param pVCpu The cross context virtual CPU structure.
4566 * @param pVmcsInfo The VMCS information structure.
4567 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4568 * @param fIntrState The VT-x guest-interruptibility state.
4569 * @param fStepping Whether we are single-stepping the guest using the
4570 * hypervisor debugger and should return
4571 * VINF_EM_DBG_STEPPED if the event was dispatched
4572 * directly.
4573 */
4574static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4575 uint32_t fIntrState, bool fStepping)
4576{
4577 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4578#ifndef IN_NEM_DARWIN
4579 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4580#endif
4581
4582#ifdef VBOX_STRICT
4583 /*
4584 * Verify guest-interruptibility state.
4585 *
4586 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4587 * since injecting an event may modify the interruptibility state and we must thus always
4588 * use fIntrState.
4589 */
4590 {
4591 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4592 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4593 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4594 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4595 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4596 Assert(!TRPMHasTrap(pVCpu));
4597 NOREF(fBlockMovSS); NOREF(fBlockSti);
4598 }
4599#endif
4600
4601 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4602 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4603 {
4604 /*
4605 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4606 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4607 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4608 *
4609 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4610 */
4611 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4612#ifdef VBOX_STRICT
4613 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4614 {
4615 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4616 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4617 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4618 }
4619 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4620 {
4621 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4622 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4623 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4624 }
4625#endif
4626 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4627 uIntType));
4628
4629 /*
4630 * Inject the event and get any changes to the guest-interruptibility state.
4631 *
4632 * The guest-interruptibility state may need to be updated if we inject the event
4633 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4634 */
4635 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4636 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4637
4638 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4639 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4640 else
4641 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4642 }
4643
4644 /*
4645 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4646 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4647 */
4648 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4649 && !fIsNestedGuest)
4650 {
4651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4652
4653 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4654 {
4655 /*
4656 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4657 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4658 */
4659 Assert(!DBGFIsStepping(pVCpu));
4660 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4661 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4662 AssertRC(rc);
4663 }
4664 else
4665 {
4666 /*
4667 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4668 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4669 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4670 * we use MTF, so just make sure it's called before executing guest-code.
4671 */
4672 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4673 }
4674 }
4675 /* else: for nested-guest currently handling while merging controls. */
4676
4677 /*
4678 * Finally, update the guest-interruptibility state.
4679 *
4680 * This is required for the real-on-v86 software interrupt injection, for
4681 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4682 */
4683 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4684 AssertRC(rc);
4685
4686 /*
4687 * There's no need to clear the VM-entry interruption-information field here if we're not
4688 * injecting anything. VT-x clears the valid bit on every VM-exit.
4689 *
4690 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4691 */
4692
4693 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4694 return rcStrict;
4695}
4696
4697
4698/**
4699 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4700 * and update error record fields accordingly.
4701 *
4702 * @returns VMX_IGS_* error codes.
4703 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4704 * wrong with the guest state.
4705 *
4706 * @param pVCpu The cross context virtual CPU structure.
4707 * @param pVmcsInfo The VMCS info. object.
4708 *
4709 * @remarks This function assumes our cache of the VMCS controls
4710 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4711 */
4712static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4713{
4714#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4715#define HMVMX_CHECK_BREAK(expr, err) do { \
4716 if (!(expr)) { uError = (err); break; } \
4717 } while (0)
4718
4719 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4720 uint32_t uError = VMX_IGS_ERROR;
4721 uint32_t u32IntrState = 0;
4722#ifndef IN_NEM_DARWIN
4723 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4724 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4725#else
4726 bool const fUnrestrictedGuest = true;
4727#endif
4728 do
4729 {
4730 int rc;
4731
4732 /*
4733 * Guest-interruptibility state.
4734 *
4735 * Read this first so that any check that fails prior to those that actually
4736 * require the guest-interruptibility state would still reflect the correct
4737 * VMCS value and avoids causing further confusion.
4738 */
4739 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4740 AssertRC(rc);
4741
4742 uint32_t u32Val;
4743 uint64_t u64Val;
4744
4745 /*
4746 * CR0.
4747 */
4748 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4749 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4750 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4751 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4752 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4753 if (fUnrestrictedGuest)
4754 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4755
4756 uint64_t u64GuestCr0;
4757 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4758 AssertRC(rc);
4759 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4760 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4761 if ( !fUnrestrictedGuest
4762 && (u64GuestCr0 & X86_CR0_PG)
4763 && !(u64GuestCr0 & X86_CR0_PE))
4764 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4765
4766 /*
4767 * CR4.
4768 */
4769 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4770 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4771 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4772
4773 uint64_t u64GuestCr4;
4774 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4775 AssertRC(rc);
4776 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4777 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4778
4779 /*
4780 * IA32_DEBUGCTL MSR.
4781 */
4782 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4783 AssertRC(rc);
4784 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4785 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4786 {
4787 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4788 }
4789 uint64_t u64DebugCtlMsr = u64Val;
4790
4791#ifdef VBOX_STRICT
4792 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4793 AssertRC(rc);
4794 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4795#endif
4796 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4797
4798 /*
4799 * RIP and RFLAGS.
4800 */
4801 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4802 AssertRC(rc);
4803 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4804 if ( !fLongModeGuest
4805 || !pCtx->cs.Attr.n.u1Long)
4806 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4807 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4808 * must be identical if the "IA-32e mode guest" VM-entry
4809 * control is 1 and CS.L is 1. No check applies if the
4810 * CPU supports 64 linear-address bits. */
4811
4812 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4813 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4814 AssertRC(rc);
4815 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4816 VMX_IGS_RFLAGS_RESERVED);
4817 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4818 uint32_t const u32Eflags = u64Val;
4819
4820 if ( fLongModeGuest
4821 || ( fUnrestrictedGuest
4822 && !(u64GuestCr0 & X86_CR0_PE)))
4823 {
4824 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4825 }
4826
4827 uint32_t u32EntryInfo;
4828 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4829 AssertRC(rc);
4830 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4831 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4832
4833 /*
4834 * 64-bit checks.
4835 */
4836 if (fLongModeGuest)
4837 {
4838 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4839 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4840 }
4841
4842 if ( !fLongModeGuest
4843 && (u64GuestCr4 & X86_CR4_PCIDE))
4844 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4845
4846 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4847 * 51:32 beyond the processor's physical-address width are 0. */
4848
4849 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4850 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4851 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4852
4853#ifndef IN_NEM_DARWIN
4854 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4855 AssertRC(rc);
4856 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4857
4858 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4859 AssertRC(rc);
4860 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4861#endif
4862
4863 /*
4864 * PERF_GLOBAL MSR.
4865 */
4866 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4867 {
4868 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4869 AssertRC(rc);
4870 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4871 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4872 }
4873
4874 /*
4875 * PAT MSR.
4876 */
4877 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4878 {
4879 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4880 AssertRC(rc);
4881 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4882 for (unsigned i = 0; i < 8; i++)
4883 {
4884 uint8_t u8Val = (u64Val & 0xff);
4885 if ( u8Val != 0 /* UC */
4886 && u8Val != 1 /* WC */
4887 && u8Val != 4 /* WT */
4888 && u8Val != 5 /* WP */
4889 && u8Val != 6 /* WB */
4890 && u8Val != 7 /* UC- */)
4891 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4892 u64Val >>= 8;
4893 }
4894 }
4895
4896 /*
4897 * EFER MSR.
4898 */
4899 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4900 {
4901 Assert(g_fHmVmxSupportsVmcsEfer);
4902 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4903 AssertRC(rc);
4904 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4905 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4906 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4907 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4908 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4909 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4910 * iemVmxVmentryCheckGuestState(). */
4911 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4912 || !(u64GuestCr0 & X86_CR0_PG)
4913 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4914 VMX_IGS_EFER_LMA_LME_MISMATCH);
4915 }
4916
4917 /*
4918 * Segment registers.
4919 */
4920 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4921 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4922 if (!(u32Eflags & X86_EFL_VM))
4923 {
4924 /* CS */
4925 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4926 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4927 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4928 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4929 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4930 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4931 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4932 /* CS cannot be loaded with NULL in protected mode. */
4933 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4934 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4935 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4936 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4937 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4938 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4939 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4940 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4941 else
4942 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4943
4944 /* SS */
4945 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4946 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4947 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4948 if ( !(pCtx->cr0 & X86_CR0_PE)
4949 || pCtx->cs.Attr.n.u4Type == 3)
4950 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4951
4952 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4953 {
4954 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4955 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4956 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4957 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4958 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4959 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4960 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4961 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4962 }
4963
4964 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4965 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4966 {
4967 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4968 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4969 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4970 || pCtx->ds.Attr.n.u4Type > 11
4971 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4972 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4973 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4974 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4975 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4976 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4977 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4978 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4979 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4980 }
4981 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4982 {
4983 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4984 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4985 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4986 || pCtx->es.Attr.n.u4Type > 11
4987 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4988 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4989 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4990 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4991 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4992 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4993 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4994 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4995 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
4996 }
4997 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4998 {
4999 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5000 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5001 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5002 || pCtx->fs.Attr.n.u4Type > 11
5003 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5004 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5005 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5006 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5007 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5008 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5009 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5010 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5011 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5012 }
5013 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5014 {
5015 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5016 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5017 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5018 || pCtx->gs.Attr.n.u4Type > 11
5019 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5020 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5021 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5022 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5023 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5024 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5025 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5026 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5027 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5028 }
5029 /* 64-bit capable CPUs. */
5030 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5031 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5032 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5033 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5034 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5035 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5036 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5037 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5038 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5039 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5040 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5041 }
5042 else
5043 {
5044 /* V86 mode checks. */
5045 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5046 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5047 {
5048 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5049 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5050 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5051 }
5052 else
5053 {
5054 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5055 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5056 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5057 }
5058
5059 /* CS */
5060 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5061 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5062 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5063 /* SS */
5064 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5065 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5066 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5067 /* DS */
5068 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5069 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5070 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5071 /* ES */
5072 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5073 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5074 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5075 /* FS */
5076 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5077 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5078 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5079 /* GS */
5080 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5081 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5082 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5083 /* 64-bit capable CPUs. */
5084 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5085 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5086 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5087 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5088 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5089 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5090 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5091 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5092 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5093 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5094 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5095 }
5096
5097 /*
5098 * TR.
5099 */
5100 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5101 /* 64-bit capable CPUs. */
5102 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5103 if (fLongModeGuest)
5104 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5105 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5106 else
5107 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5108 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5109 VMX_IGS_TR_ATTR_TYPE_INVALID);
5110 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5111 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5112 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5113 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5114 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5115 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5116 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5117 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5118
5119 /*
5120 * GDTR and IDTR (64-bit capable checks).
5121 */
5122 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5123 AssertRC(rc);
5124 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5125
5126 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5127 AssertRC(rc);
5128 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5129
5130 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5131 AssertRC(rc);
5132 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5133
5134 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5135 AssertRC(rc);
5136 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5137
5138 /*
5139 * Guest Non-Register State.
5140 */
5141 /* Activity State. */
5142 uint32_t u32ActivityState;
5143 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5144 AssertRC(rc);
5145 HMVMX_CHECK_BREAK( !u32ActivityState
5146 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5147 VMX_IGS_ACTIVITY_STATE_INVALID);
5148 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5149 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5150
5151 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5152 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5153 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5154
5155 /** @todo Activity state and injecting interrupts. Left as a todo since we
5156 * currently don't use activity states but ACTIVE. */
5157
5158 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5159 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5160
5161 /* Guest interruptibility-state. */
5162 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5163 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5164 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5165 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5166 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5167 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5168 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5169 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5170 {
5171 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5172 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5173 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5174 }
5175 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5176 {
5177 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5178 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5179 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5180 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5181 }
5182 /** @todo Assumes the processor is not in SMM. */
5183 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5184 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5185 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5186 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5187 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5188 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5189 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5190 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5191
5192 /* Pending debug exceptions. */
5193 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5194 AssertRC(rc);
5195 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5196 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5197 u32Val = u64Val; /* For pending debug exceptions checks below. */
5198
5199 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5200 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5201 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5202 {
5203 if ( (u32Eflags & X86_EFL_TF)
5204 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5205 {
5206 /* Bit 14 is PendingDebug.BS. */
5207 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5208 }
5209 if ( !(u32Eflags & X86_EFL_TF)
5210 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5211 {
5212 /* Bit 14 is PendingDebug.BS. */
5213 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5214 }
5215 }
5216
5217#ifndef IN_NEM_DARWIN
5218 /* VMCS link pointer. */
5219 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5220 AssertRC(rc);
5221 if (u64Val != UINT64_C(0xffffffffffffffff))
5222 {
5223 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5224 /** @todo Bits beyond the processor's physical-address width MBZ. */
5225 /** @todo SMM checks. */
5226 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5227 Assert(pVmcsInfo->pvShadowVmcs);
5228 VMXVMCSREVID VmcsRevId;
5229 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5230 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5231 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5232 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5233 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5234 }
5235
5236 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5237 * not using nested paging? */
5238 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5239 && !fLongModeGuest
5240 && CPUMIsGuestInPAEModeEx(pCtx))
5241 {
5242 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5243 AssertRC(rc);
5244 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5245
5246 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5247 AssertRC(rc);
5248 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5249
5250 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5251 AssertRC(rc);
5252 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5253
5254 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5255 AssertRC(rc);
5256 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5257 }
5258#endif
5259
5260 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5261 if (uError == VMX_IGS_ERROR)
5262 uError = VMX_IGS_REASON_NOT_FOUND;
5263 } while (0);
5264
5265 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5266 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5267 return uError;
5268
5269#undef HMVMX_ERROR_BREAK
5270#undef HMVMX_CHECK_BREAK
5271}
5272
5273
5274#ifndef HMVMX_USE_FUNCTION_TABLE
5275/**
5276 * Handles a guest VM-exit from hardware-assisted VMX execution.
5277 *
5278 * @returns Strict VBox status code (i.e. informational status codes too).
5279 * @param pVCpu The cross context virtual CPU structure.
5280 * @param pVmxTransient The VMX-transient structure.
5281 */
5282DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5283{
5284#ifdef DEBUG_ramshankar
5285# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5286 do { \
5287 if (a_fSave != 0) \
5288 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5289 VBOXSTRICTRC rcStrict = a_CallExpr; \
5290 if (a_fSave != 0) \
5291 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5292 return rcStrict; \
5293 } while (0)
5294#else
5295# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5296#endif
5297 uint32_t const uExitReason = pVmxTransient->uExitReason;
5298 switch (uExitReason)
5299 {
5300 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5301 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5302 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5303 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5304 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5305 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5306 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5307 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5308 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5309 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5310 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5311 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5312 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5313 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5314 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5315 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5316 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5317 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5318 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5319 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5320 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5321 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5322 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5323 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5324 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5325 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5326 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5327 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5328 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5329 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5330#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5331 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5332 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5333 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5334 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5335 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5336 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5337 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5338 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5339 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5340 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5341#else
5342 case VMX_EXIT_VMCLEAR:
5343 case VMX_EXIT_VMLAUNCH:
5344 case VMX_EXIT_VMPTRLD:
5345 case VMX_EXIT_VMPTRST:
5346 case VMX_EXIT_VMREAD:
5347 case VMX_EXIT_VMRESUME:
5348 case VMX_EXIT_VMWRITE:
5349 case VMX_EXIT_VMXOFF:
5350 case VMX_EXIT_VMXON:
5351 case VMX_EXIT_INVVPID:
5352 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5353#endif
5354#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
5355 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5356#else
5357 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5358#endif
5359
5360 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5361 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5362 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5363
5364 case VMX_EXIT_INIT_SIGNAL:
5365 case VMX_EXIT_SIPI:
5366 case VMX_EXIT_IO_SMI:
5367 case VMX_EXIT_SMI:
5368 case VMX_EXIT_ERR_MSR_LOAD:
5369 case VMX_EXIT_ERR_MACHINE_CHECK:
5370 case VMX_EXIT_PML_FULL:
5371 case VMX_EXIT_VIRTUALIZED_EOI:
5372 case VMX_EXIT_GDTR_IDTR_ACCESS:
5373 case VMX_EXIT_LDTR_TR_ACCESS:
5374 case VMX_EXIT_APIC_WRITE:
5375 case VMX_EXIT_RDRAND:
5376 case VMX_EXIT_RSM:
5377 case VMX_EXIT_VMFUNC:
5378 case VMX_EXIT_ENCLS:
5379 case VMX_EXIT_RDSEED:
5380 case VMX_EXIT_XSAVES:
5381 case VMX_EXIT_XRSTORS:
5382 case VMX_EXIT_UMWAIT:
5383 case VMX_EXIT_TPAUSE:
5384 case VMX_EXIT_LOADIWKEY:
5385 default:
5386 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5387 }
5388#undef VMEXIT_CALL_RET
5389}
5390#endif /* !HMVMX_USE_FUNCTION_TABLE */
5391
5392
5393#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5394/**
5395 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5396 *
5397 * @returns Strict VBox status code (i.e. informational status codes too).
5398 * @param pVCpu The cross context virtual CPU structure.
5399 * @param pVmxTransient The VMX-transient structure.
5400 */
5401DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5402{
5403 uint32_t const uExitReason = pVmxTransient->uExitReason;
5404 switch (uExitReason)
5405 {
5406# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5407 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5408 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5409# else
5410 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5411 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5412# endif
5413 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5414 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5415 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5416
5417 /*
5418 * We shouldn't direct host physical interrupts to the nested-guest.
5419 */
5420 case VMX_EXIT_EXT_INT:
5421 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5422
5423 /*
5424 * Instructions that cause VM-exits unconditionally or the condition is
5425 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5426 * happens, it's guaranteed to be a nested-guest VM-exit).
5427 *
5428 * - Provides VM-exit instruction length ONLY.
5429 */
5430 case VMX_EXIT_CPUID: /* Unconditional. */
5431 case VMX_EXIT_VMCALL:
5432 case VMX_EXIT_GETSEC:
5433 case VMX_EXIT_INVD:
5434 case VMX_EXIT_XSETBV:
5435 case VMX_EXIT_VMLAUNCH:
5436 case VMX_EXIT_VMRESUME:
5437 case VMX_EXIT_VMXOFF:
5438 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5439 case VMX_EXIT_VMFUNC:
5440 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5441
5442 /*
5443 * Instructions that cause VM-exits unconditionally or the condition is
5444 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5445 * happens, it's guaranteed to be a nested-guest VM-exit).
5446 *
5447 * - Provides VM-exit instruction length.
5448 * - Provides VM-exit information.
5449 * - Optionally provides Exit qualification.
5450 *
5451 * Since Exit qualification is 0 for all VM-exits where it is not
5452 * applicable, reading and passing it to the guest should produce
5453 * defined behavior.
5454 *
5455 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5456 */
5457 case VMX_EXIT_INVEPT: /* Unconditional. */
5458 case VMX_EXIT_INVVPID:
5459 case VMX_EXIT_VMCLEAR:
5460 case VMX_EXIT_VMPTRLD:
5461 case VMX_EXIT_VMPTRST:
5462 case VMX_EXIT_VMXON:
5463 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5464 case VMX_EXIT_LDTR_TR_ACCESS:
5465 case VMX_EXIT_RDRAND:
5466 case VMX_EXIT_RDSEED:
5467 case VMX_EXIT_XSAVES:
5468 case VMX_EXIT_XRSTORS:
5469 case VMX_EXIT_UMWAIT:
5470 case VMX_EXIT_TPAUSE:
5471 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5472
5473 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5474 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5475 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5476 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5477 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5478 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5479 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5480 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5481 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5482 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5483 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5484 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5485 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5486 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5487 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5492
5493 case VMX_EXIT_PREEMPT_TIMER:
5494 {
5495 /** @todo NSTVMX: Preempt timer. */
5496 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5497 }
5498
5499 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5500 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5501
5502 case VMX_EXIT_VMREAD:
5503 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5504
5505 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5506 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5507
5508 case VMX_EXIT_INIT_SIGNAL:
5509 case VMX_EXIT_SIPI:
5510 case VMX_EXIT_IO_SMI:
5511 case VMX_EXIT_SMI:
5512 case VMX_EXIT_ERR_MSR_LOAD:
5513 case VMX_EXIT_ERR_MACHINE_CHECK:
5514 case VMX_EXIT_PML_FULL:
5515 case VMX_EXIT_RSM:
5516 default:
5517 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5518 }
5519}
5520#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5521
5522
5523/** @name VM-exit helpers.
5524 * @{
5525 */
5526/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5527/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5528/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5529
5530/** Macro for VM-exits called unexpectedly. */
5531#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5532 do { \
5533 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5534 return VERR_VMX_UNEXPECTED_EXIT; \
5535 } while (0)
5536
5537#ifdef VBOX_STRICT
5538# ifndef IN_NEM_DARWIN
5539/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5540# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5541 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5542
5543# define HMVMX_ASSERT_PREEMPT_CPUID() \
5544 do { \
5545 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5546 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5547 } while (0)
5548
5549# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5550 do { \
5551 AssertPtr((a_pVCpu)); \
5552 AssertPtr((a_pVmxTransient)); \
5553 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5554 Assert((a_pVmxTransient)->pVmcsInfo); \
5555 Assert(ASMIntAreEnabled()); \
5556 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5557 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5558 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5559 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5560 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5561 HMVMX_ASSERT_PREEMPT_CPUID(); \
5562 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5563 } while (0)
5564# else
5565# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5566# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5567# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5568 do { \
5569 AssertPtr((a_pVCpu)); \
5570 AssertPtr((a_pVmxTransient)); \
5571 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5572 Assert((a_pVmxTransient)->pVmcsInfo); \
5573 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5574 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5575 } while (0)
5576# endif
5577
5578# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5579 do { \
5580 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5581 Assert((a_pVmxTransient)->fIsNestedGuest); \
5582 } while (0)
5583
5584# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5585 do { \
5586 Log4Func(("\n")); \
5587 } while (0)
5588#else
5589# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5590 do { \
5591 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5592 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5593 } while (0)
5594
5595# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5596 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5597
5598# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5599#endif
5600
5601#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5602/** Macro that does the necessary privilege checks and intercepted VM-exits for
5603 * guests that attempted to execute a VMX instruction. */
5604# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5605 do \
5606 { \
5607 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5608 if (rcStrictTmp == VINF_SUCCESS) \
5609 { /* likely */ } \
5610 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5611 { \
5612 Assert((a_pVCpu)->hm.s.Event.fPending); \
5613 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5614 return VINF_SUCCESS; \
5615 } \
5616 else \
5617 { \
5618 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5619 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5620 } \
5621 } while (0)
5622
5623/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5624# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5625 do \
5626 { \
5627 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5628 (a_pGCPtrEffAddr)); \
5629 if (rcStrictTmp == VINF_SUCCESS) \
5630 { /* likely */ } \
5631 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5632 { \
5633 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5634 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5635 NOREF(uXcptTmp); \
5636 return VINF_SUCCESS; \
5637 } \
5638 else \
5639 { \
5640 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5641 return rcStrictTmp; \
5642 } \
5643 } while (0)
5644#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5645
5646
5647/**
5648 * Advances the guest RIP by the specified number of bytes.
5649 *
5650 * @param pVCpu The cross context virtual CPU structure.
5651 * @param cbInstr Number of bytes to advance the RIP by.
5652 *
5653 * @remarks No-long-jump zone!!!
5654 */
5655DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5656{
5657 /* Advance the RIP. */
5658 pVCpu->cpum.GstCtx.rip += cbInstr;
5659 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5660
5661 /* Update interrupt inhibition. */
5662 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5663 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5664 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5665}
5666
5667
5668/**
5669 * Advances the guest RIP after reading it from the VMCS.
5670 *
5671 * @returns VBox status code, no informational status codes.
5672 * @param pVCpu The cross context virtual CPU structure.
5673 * @param pVmxTransient The VMX-transient structure.
5674 *
5675 * @remarks No-long-jump zone!!!
5676 */
5677static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5678{
5679 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5680 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5681 AssertRCReturn(rc, rc);
5682
5683 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5684 return VINF_SUCCESS;
5685}
5686
5687
5688/**
5689 * Handle a condition that occurred while delivering an event through the guest or
5690 * nested-guest IDT.
5691 *
5692 * @returns Strict VBox status code (i.e. informational status codes too).
5693 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5694 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5695 * to continue execution of the guest which will delivery the \#DF.
5696 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5697 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5698 *
5699 * @param pVCpu The cross context virtual CPU structure.
5700 * @param pVmxTransient The VMX-transient structure.
5701 *
5702 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5703 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5704 * is due to an EPT violation, PML full or SPP-related event.
5705 *
5706 * @remarks No-long-jump zone!!!
5707 */
5708static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5709{
5710 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5711 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5712 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5713 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5714 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5715 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5716
5717 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5718 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5719 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5720 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5721 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5722 {
5723 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5724 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5725
5726 /*
5727 * If the event was a software interrupt (generated with INT n) or a software exception
5728 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5729 * can handle the VM-exit and continue guest execution which will re-execute the
5730 * instruction rather than re-injecting the exception, as that can cause premature
5731 * trips to ring-3 before injection and involve TRPM which currently has no way of
5732 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5733 * the problem).
5734 */
5735 IEMXCPTRAISE enmRaise;
5736 IEMXCPTRAISEINFO fRaiseInfo;
5737 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5738 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5739 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5740 {
5741 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5742 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5743 }
5744 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5745 {
5746 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5747 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5748 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5749
5750 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5751 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5752
5753 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5754
5755 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5756 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5757 {
5758 pVmxTransient->fVectoringPF = true;
5759 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5760 }
5761 }
5762 else
5763 {
5764 /*
5765 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5766 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5767 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5768 */
5769 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5770 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5771 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5772 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5773 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5774 }
5775
5776 /*
5777 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5778 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5779 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5780 * subsequent VM-entry would fail, see @bugref{7445}.
5781 *
5782 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5783 */
5784 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5785 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5786 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5787 && CPUMIsGuestNmiBlocking(pVCpu))
5788 {
5789 CPUMSetGuestNmiBlocking(pVCpu, false);
5790 }
5791
5792 switch (enmRaise)
5793 {
5794 case IEMXCPTRAISE_CURRENT_XCPT:
5795 {
5796 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5797 Assert(rcStrict == VINF_SUCCESS);
5798 break;
5799 }
5800
5801 case IEMXCPTRAISE_PREV_EVENT:
5802 {
5803 uint32_t u32ErrCode;
5804 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5805 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5806 else
5807 u32ErrCode = 0;
5808
5809 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5810 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5811 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5812 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5813
5814 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5815 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5816 Assert(rcStrict == VINF_SUCCESS);
5817 break;
5818 }
5819
5820 case IEMXCPTRAISE_REEXEC_INSTR:
5821 Assert(rcStrict == VINF_SUCCESS);
5822 break;
5823
5824 case IEMXCPTRAISE_DOUBLE_FAULT:
5825 {
5826 /*
5827 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5828 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5829 */
5830 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5831 {
5832 pVmxTransient->fVectoringDoublePF = true;
5833 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5834 pVCpu->cpum.GstCtx.cr2));
5835 rcStrict = VINF_SUCCESS;
5836 }
5837 else
5838 {
5839 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5840 vmxHCSetPendingXcptDF(pVCpu);
5841 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5842 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5843 rcStrict = VINF_HM_DOUBLE_FAULT;
5844 }
5845 break;
5846 }
5847
5848 case IEMXCPTRAISE_TRIPLE_FAULT:
5849 {
5850 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5851 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5852 rcStrict = VINF_EM_RESET;
5853 break;
5854 }
5855
5856 case IEMXCPTRAISE_CPU_HANG:
5857 {
5858 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5859 rcStrict = VERR_EM_GUEST_CPU_HANG;
5860 break;
5861 }
5862
5863 default:
5864 {
5865 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5866 rcStrict = VERR_VMX_IPE_2;
5867 break;
5868 }
5869 }
5870 }
5871 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5872 && !CPUMIsGuestNmiBlocking(pVCpu))
5873 {
5874 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5875 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5876 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5877 {
5878 /*
5879 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5880 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5881 * that virtual NMIs remain blocked until the IRET execution is completed.
5882 *
5883 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5884 */
5885 CPUMSetGuestNmiBlocking(pVCpu, true);
5886 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5887 }
5888 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5889 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5890 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5891 {
5892 /*
5893 * Execution of IRET caused an EPT violation, page-modification log-full event or
5894 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5895 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5896 * that virtual NMIs remain blocked until the IRET execution is completed.
5897 *
5898 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5899 */
5900 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5901 {
5902 CPUMSetGuestNmiBlocking(pVCpu, true);
5903 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5904 }
5905 }
5906 }
5907
5908 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5909 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5910 return rcStrict;
5911}
5912
5913
5914#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5915/**
5916 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5917 * guest attempting to execute a VMX instruction.
5918 *
5919 * @returns Strict VBox status code (i.e. informational status codes too).
5920 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5921 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5922 *
5923 * @param pVCpu The cross context virtual CPU structure.
5924 * @param uExitReason The VM-exit reason.
5925 *
5926 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5927 * @remarks No-long-jump zone!!!
5928 */
5929static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5930{
5931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5932 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5933
5934 /*
5935 * The physical CPU would have already checked the CPU mode/code segment.
5936 * We shall just assert here for paranoia.
5937 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5938 */
5939 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5940 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5941 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5942
5943 if (uExitReason == VMX_EXIT_VMXON)
5944 {
5945 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5946
5947 /*
5948 * We check CR4.VMXE because it is required to be always set while in VMX operation
5949 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5950 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5951 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5952 */
5953 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5954 {
5955 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5956 vmxHCSetPendingXcptUD(pVCpu);
5957 return VINF_HM_PENDING_XCPT;
5958 }
5959 }
5960 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5961 {
5962 /*
5963 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5964 * (other than VMXON), we need to raise a #UD.
5965 */
5966 Log4Func(("Not in VMX root mode -> #UD\n"));
5967 vmxHCSetPendingXcptUD(pVCpu);
5968 return VINF_HM_PENDING_XCPT;
5969 }
5970
5971 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5972 return VINF_SUCCESS;
5973}
5974
5975
5976/**
5977 * Decodes the memory operand of an instruction that caused a VM-exit.
5978 *
5979 * The Exit qualification field provides the displacement field for memory
5980 * operand instructions, if any.
5981 *
5982 * @returns Strict VBox status code (i.e. informational status codes too).
5983 * @retval VINF_SUCCESS if the operand was successfully decoded.
5984 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5985 * operand.
5986 * @param pVCpu The cross context virtual CPU structure.
5987 * @param uExitInstrInfo The VM-exit instruction information field.
5988 * @param enmMemAccess The memory operand's access type (read or write).
5989 * @param GCPtrDisp The instruction displacement field, if any. For
5990 * RIP-relative addressing pass RIP + displacement here.
5991 * @param pGCPtrMem Where to store the effective destination memory address.
5992 *
5993 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5994 * virtual-8086 mode hence skips those checks while verifying if the
5995 * segment is valid.
5996 */
5997static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5998 PRTGCPTR pGCPtrMem)
5999{
6000 Assert(pGCPtrMem);
6001 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6002 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6003 | CPUMCTX_EXTRN_CR0);
6004
6005 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6006 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6007 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6008
6009 VMXEXITINSTRINFO ExitInstrInfo;
6010 ExitInstrInfo.u = uExitInstrInfo;
6011 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6012 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6013 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6014 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6015 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6016 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6017 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6018 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6019 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6020
6021 /*
6022 * Validate instruction information.
6023 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6024 */
6025 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6026 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6027 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6028 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6029 AssertLogRelMsgReturn(fIsMemOperand,
6030 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6031
6032 /*
6033 * Compute the complete effective address.
6034 *
6035 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6036 * See AMD spec. 4.5.2 "Segment Registers".
6037 */
6038 RTGCPTR GCPtrMem = GCPtrDisp;
6039 if (fBaseRegValid)
6040 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6041 if (fIdxRegValid)
6042 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6043
6044 RTGCPTR const GCPtrOff = GCPtrMem;
6045 if ( !fIsLongMode
6046 || iSegReg >= X86_SREG_FS)
6047 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6048 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6049
6050 /*
6051 * Validate effective address.
6052 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6053 */
6054 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6055 Assert(cbAccess > 0);
6056 if (fIsLongMode)
6057 {
6058 if (X86_IS_CANONICAL(GCPtrMem))
6059 {
6060 *pGCPtrMem = GCPtrMem;
6061 return VINF_SUCCESS;
6062 }
6063
6064 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6065 * "Data Limit Checks in 64-bit Mode". */
6066 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6067 vmxHCSetPendingXcptGP(pVCpu, 0);
6068 return VINF_HM_PENDING_XCPT;
6069 }
6070
6071 /*
6072 * This is a watered down version of iemMemApplySegment().
6073 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6074 * and segment CPL/DPL checks are skipped.
6075 */
6076 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6077 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6078 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6079
6080 /* Check if the segment is present and usable. */
6081 if ( pSel->Attr.n.u1Present
6082 && !pSel->Attr.n.u1Unusable)
6083 {
6084 Assert(pSel->Attr.n.u1DescType);
6085 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6086 {
6087 /* Check permissions for the data segment. */
6088 if ( enmMemAccess == VMXMEMACCESS_WRITE
6089 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6090 {
6091 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6092 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6093 return VINF_HM_PENDING_XCPT;
6094 }
6095
6096 /* Check limits if it's a normal data segment. */
6097 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6098 {
6099 if ( GCPtrFirst32 > pSel->u32Limit
6100 || GCPtrLast32 > pSel->u32Limit)
6101 {
6102 Log4Func(("Data segment limit exceeded. "
6103 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6104 GCPtrLast32, pSel->u32Limit));
6105 if (iSegReg == X86_SREG_SS)
6106 vmxHCSetPendingXcptSS(pVCpu, 0);
6107 else
6108 vmxHCSetPendingXcptGP(pVCpu, 0);
6109 return VINF_HM_PENDING_XCPT;
6110 }
6111 }
6112 else
6113 {
6114 /* Check limits if it's an expand-down data segment.
6115 Note! The upper boundary is defined by the B bit, not the G bit! */
6116 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6117 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6118 {
6119 Log4Func(("Expand-down data segment limit exceeded. "
6120 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6121 GCPtrLast32, pSel->u32Limit));
6122 if (iSegReg == X86_SREG_SS)
6123 vmxHCSetPendingXcptSS(pVCpu, 0);
6124 else
6125 vmxHCSetPendingXcptGP(pVCpu, 0);
6126 return VINF_HM_PENDING_XCPT;
6127 }
6128 }
6129 }
6130 else
6131 {
6132 /* Check permissions for the code segment. */
6133 if ( enmMemAccess == VMXMEMACCESS_WRITE
6134 || ( enmMemAccess == VMXMEMACCESS_READ
6135 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6136 {
6137 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6138 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6139 vmxHCSetPendingXcptGP(pVCpu, 0);
6140 return VINF_HM_PENDING_XCPT;
6141 }
6142
6143 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6144 if ( GCPtrFirst32 > pSel->u32Limit
6145 || GCPtrLast32 > pSel->u32Limit)
6146 {
6147 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6148 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6149 if (iSegReg == X86_SREG_SS)
6150 vmxHCSetPendingXcptSS(pVCpu, 0);
6151 else
6152 vmxHCSetPendingXcptGP(pVCpu, 0);
6153 return VINF_HM_PENDING_XCPT;
6154 }
6155 }
6156 }
6157 else
6158 {
6159 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6160 vmxHCSetPendingXcptGP(pVCpu, 0);
6161 return VINF_HM_PENDING_XCPT;
6162 }
6163
6164 *pGCPtrMem = GCPtrMem;
6165 return VINF_SUCCESS;
6166}
6167#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6168
6169
6170/**
6171 * VM-exit helper for LMSW.
6172 */
6173static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6174{
6175 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6176 AssertRCReturn(rc, rc);
6177
6178 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6179 AssertMsg( rcStrict == VINF_SUCCESS
6180 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6181
6182 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6183 if (rcStrict == VINF_IEM_RAISED_XCPT)
6184 {
6185 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6186 rcStrict = VINF_SUCCESS;
6187 }
6188
6189 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6190 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6191 return rcStrict;
6192}
6193
6194
6195/**
6196 * VM-exit helper for CLTS.
6197 */
6198static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6199{
6200 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6201 AssertRCReturn(rc, rc);
6202
6203 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6204 AssertMsg( rcStrict == VINF_SUCCESS
6205 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6206
6207 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6208 if (rcStrict == VINF_IEM_RAISED_XCPT)
6209 {
6210 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6211 rcStrict = VINF_SUCCESS;
6212 }
6213
6214 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6215 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6216 return rcStrict;
6217}
6218
6219
6220/**
6221 * VM-exit helper for MOV from CRx (CRx read).
6222 */
6223static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6224{
6225 Assert(iCrReg < 16);
6226 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6227
6228 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6229 AssertRCReturn(rc, rc);
6230
6231 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6232 AssertMsg( rcStrict == VINF_SUCCESS
6233 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6234
6235 if (iGReg == X86_GREG_xSP)
6236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6237 else
6238 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6239#ifdef VBOX_WITH_STATISTICS
6240 switch (iCrReg)
6241 {
6242 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6243 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6244 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6245 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6246 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6247 }
6248#endif
6249 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6250 return rcStrict;
6251}
6252
6253
6254/**
6255 * VM-exit helper for MOV to CRx (CRx write).
6256 */
6257static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6258{
6259 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6260
6261 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6262 AssertMsg( rcStrict == VINF_SUCCESS
6263 || rcStrict == VINF_IEM_RAISED_XCPT
6264 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6265
6266 switch (iCrReg)
6267 {
6268 case 0:
6269 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6270 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6271 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6272 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6273 break;
6274
6275 case 2:
6276 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6277 /* Nothing to do here, CR2 it's not part of the VMCS. */
6278 break;
6279
6280 case 3:
6281 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6282 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6283 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6284 break;
6285
6286 case 4:
6287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6288 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6289#ifndef IN_NEM_DARWIN
6290 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6291 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6292#else
6293 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6294#endif
6295 break;
6296
6297 case 8:
6298 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6299 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6300 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6301 break;
6302
6303 default:
6304 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6305 break;
6306 }
6307
6308 if (rcStrict == VINF_IEM_RAISED_XCPT)
6309 {
6310 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6311 rcStrict = VINF_SUCCESS;
6312 }
6313 return rcStrict;
6314}
6315
6316
6317/**
6318 * VM-exit exception handler for \#PF (Page-fault exception).
6319 *
6320 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6321 */
6322static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6323{
6324 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6325 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6326
6327#ifndef IN_NEM_DARWIN
6328 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6329 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6330 { /* likely */ }
6331 else
6332#endif
6333 {
6334#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6335 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6336#endif
6337 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6338 if (!pVmxTransient->fVectoringDoublePF)
6339 {
6340 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6341 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6342 }
6343 else
6344 {
6345 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6346 Assert(!pVmxTransient->fIsNestedGuest);
6347 vmxHCSetPendingXcptDF(pVCpu);
6348 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6349 }
6350 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6351 return VINF_SUCCESS;
6352 }
6353
6354 Assert(!pVmxTransient->fIsNestedGuest);
6355
6356 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6357 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6358 if (pVmxTransient->fVectoringPF)
6359 {
6360 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6361 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6362 }
6363
6364 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6365 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6366 AssertRCReturn(rc, rc);
6367
6368 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6369 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6370
6371 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6372 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6373
6374 Log4Func(("#PF: rc=%Rrc\n", rc));
6375 if (rc == VINF_SUCCESS)
6376 {
6377 /*
6378 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6379 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6380 */
6381 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6382 TRPMResetTrap(pVCpu);
6383 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6384 return rc;
6385 }
6386
6387 if (rc == VINF_EM_RAW_GUEST_TRAP)
6388 {
6389 if (!pVmxTransient->fVectoringDoublePF)
6390 {
6391 /* It's a guest page fault and needs to be reflected to the guest. */
6392 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6393 TRPMResetTrap(pVCpu);
6394 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6395 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6396 uGstErrorCode, pVmxTransient->uExitQual);
6397 }
6398 else
6399 {
6400 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6401 TRPMResetTrap(pVCpu);
6402 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6403 vmxHCSetPendingXcptDF(pVCpu);
6404 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6405 }
6406
6407 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6408 return VINF_SUCCESS;
6409 }
6410
6411 TRPMResetTrap(pVCpu);
6412 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6413 return rc;
6414}
6415
6416
6417/**
6418 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6419 *
6420 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6421 */
6422static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6423{
6424 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6425 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6426
6427 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6428 AssertRCReturn(rc, rc);
6429
6430 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6431 {
6432 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6433 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6434
6435 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6436 * provides VM-exit instruction length. If this causes problem later,
6437 * disassemble the instruction like it's done on AMD-V. */
6438 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6439 AssertRCReturn(rc2, rc2);
6440 return rc;
6441 }
6442
6443 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6444 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6445 return VINF_SUCCESS;
6446}
6447
6448
6449/**
6450 * VM-exit exception handler for \#BP (Breakpoint exception).
6451 *
6452 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6453 */
6454static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6455{
6456 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6457 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6458
6459 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6460 AssertRCReturn(rc, rc);
6461
6462 VBOXSTRICTRC rcStrict;
6463 if (!pVmxTransient->fIsNestedGuest)
6464 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6465 else
6466 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6467
6468 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6469 {
6470 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6471 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6472 rcStrict = VINF_SUCCESS;
6473 }
6474
6475 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6476 return rcStrict;
6477}
6478
6479
6480/**
6481 * VM-exit exception handler for \#AC (Alignment-check exception).
6482 *
6483 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6484 */
6485static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6486{
6487 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6488
6489 /*
6490 * Detect #ACs caused by host having enabled split-lock detection.
6491 * Emulate such instructions.
6492 */
6493 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6494 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6495 AssertRCReturn(rc, rc);
6496 /** @todo detect split lock in cpu feature? */
6497 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6498 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6499 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6500 || CPUMGetGuestCPL(pVCpu) != 3
6501 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6502 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6503 {
6504 /*
6505 * Check for debug/trace events and import state accordingly.
6506 */
6507 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6508 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6509 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6510#ifndef IN_NEM_DARWIN
6511 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6512#endif
6513 )
6514 {
6515 if (pVM->cCpus == 1)
6516 {
6517#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6518 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6519#else
6520 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6521#endif
6522 AssertRCReturn(rc, rc);
6523 }
6524 }
6525 else
6526 {
6527 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6528 AssertRCReturn(rc, rc);
6529
6530 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6531
6532 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6533 {
6534 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6535 if (rcStrict != VINF_SUCCESS)
6536 return rcStrict;
6537 }
6538 }
6539
6540 /*
6541 * Emulate the instruction.
6542 *
6543 * We have to ignore the LOCK prefix here as we must not retrigger the
6544 * detection on the host. This isn't all that satisfactory, though...
6545 */
6546 if (pVM->cCpus == 1)
6547 {
6548 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6549 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6550
6551 /** @todo For SMP configs we should do a rendezvous here. */
6552 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6553 if (rcStrict == VINF_SUCCESS)
6554#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6555 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6556 HM_CHANGED_GUEST_RIP
6557 | HM_CHANGED_GUEST_RFLAGS
6558 | HM_CHANGED_GUEST_GPRS_MASK
6559 | HM_CHANGED_GUEST_CS
6560 | HM_CHANGED_GUEST_SS);
6561#else
6562 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6563#endif
6564 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6565 {
6566 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6567 rcStrict = VINF_SUCCESS;
6568 }
6569 return rcStrict;
6570 }
6571 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6572 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6573 return VINF_EM_EMULATE_SPLIT_LOCK;
6574 }
6575
6576 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6577 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6578 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6579
6580 /* Re-inject it. We'll detect any nesting before getting here. */
6581 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6582 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6583 return VINF_SUCCESS;
6584}
6585
6586
6587/**
6588 * VM-exit exception handler for \#DB (Debug exception).
6589 *
6590 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6591 */
6592static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6593{
6594 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6595 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6596
6597 /*
6598 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6599 */
6600 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6601
6602 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6603 uint64_t const uDR6 = X86_DR6_INIT_VAL
6604 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6605 | X86_DR6_BD | X86_DR6_BS));
6606
6607 int rc;
6608 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6609 if (!pVmxTransient->fIsNestedGuest)
6610 {
6611 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6612
6613 /*
6614 * Prevents stepping twice over the same instruction when the guest is stepping using
6615 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6616 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6617 */
6618 if ( rc == VINF_EM_DBG_STEPPED
6619 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6620 {
6621 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6622 rc = VINF_EM_RAW_GUEST_TRAP;
6623 }
6624 }
6625 else
6626 rc = VINF_EM_RAW_GUEST_TRAP;
6627 Log6Func(("rc=%Rrc\n", rc));
6628 if (rc == VINF_EM_RAW_GUEST_TRAP)
6629 {
6630 /*
6631 * The exception was for the guest. Update DR6, DR7.GD and
6632 * IA32_DEBUGCTL.LBR before forwarding it.
6633 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6634 */
6635#ifndef IN_NEM_DARWIN
6636 VMMRZCallRing3Disable(pVCpu);
6637 HM_DISABLE_PREEMPT(pVCpu);
6638
6639 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6640 pCtx->dr[6] |= uDR6;
6641 if (CPUMIsGuestDebugStateActive(pVCpu))
6642 ASMSetDR6(pCtx->dr[6]);
6643
6644 HM_RESTORE_PREEMPT();
6645 VMMRZCallRing3Enable(pVCpu);
6646#else
6647 /** @todo */
6648#endif
6649
6650 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6651 AssertRCReturn(rc, rc);
6652
6653 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6654 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6655
6656 /* Paranoia. */
6657 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6658 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6659
6660 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6661 AssertRC(rc);
6662
6663 /*
6664 * Raise #DB in the guest.
6665 *
6666 * It is important to reflect exactly what the VM-exit gave us (preserving the
6667 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6668 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6669 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6670 *
6671 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6672 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6673 */
6674 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6675 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6676 return VINF_SUCCESS;
6677 }
6678
6679 /*
6680 * Not a guest trap, must be a hypervisor related debug event then.
6681 * Update DR6 in case someone is interested in it.
6682 */
6683 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6684 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6685 CPUMSetHyperDR6(pVCpu, uDR6);
6686
6687 return rc;
6688}
6689
6690
6691/**
6692 * Hacks its way around the lovely mesa driver's backdoor accesses.
6693 *
6694 * @sa hmR0SvmHandleMesaDrvGp.
6695 */
6696static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6697{
6698 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6699 RT_NOREF(pCtx);
6700
6701 /* For now we'll just skip the instruction. */
6702 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6703}
6704
6705
6706/**
6707 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6708 * backdoor logging w/o checking what it is running inside.
6709 *
6710 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6711 * backdoor port and magic numbers loaded in registers.
6712 *
6713 * @returns true if it is, false if it isn't.
6714 * @sa hmR0SvmIsMesaDrvGp.
6715 */
6716DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6717{
6718 /* 0xed: IN eAX,dx */
6719 uint8_t abInstr[1];
6720 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6721 return false;
6722
6723 /* Check that it is #GP(0). */
6724 if (pVmxTransient->uExitIntErrorCode != 0)
6725 return false;
6726
6727 /* Check magic and port. */
6728 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6729 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6730 if (pCtx->rax != UINT32_C(0x564d5868))
6731 return false;
6732 if (pCtx->dx != UINT32_C(0x5658))
6733 return false;
6734
6735 /* Flat ring-3 CS. */
6736 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6737 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6738 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6739 if (pCtx->cs.Attr.n.u2Dpl != 3)
6740 return false;
6741 if (pCtx->cs.u64Base != 0)
6742 return false;
6743
6744 /* Check opcode. */
6745 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6746 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6747 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6748 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6749 if (RT_FAILURE(rc))
6750 return false;
6751 if (abInstr[0] != 0xed)
6752 return false;
6753
6754 return true;
6755}
6756
6757
6758/**
6759 * VM-exit exception handler for \#GP (General-protection exception).
6760 *
6761 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6762 */
6763static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6764{
6765 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6766 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6767
6768 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6769 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6770#ifndef IN_NEM_DARWIN
6771 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6772 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6773 { /* likely */ }
6774 else
6775#endif
6776 {
6777#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6778# ifndef IN_NEM_DARWIN
6779 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6780# else
6781 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6782# endif
6783#endif
6784 /*
6785 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6786 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6787 */
6788 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6789 AssertRCReturn(rc, rc);
6790 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6791 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6792
6793 if ( pVmxTransient->fIsNestedGuest
6794 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6795 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6796 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6797 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6798 else
6799 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6800 return rc;
6801 }
6802
6803#ifndef IN_NEM_DARWIN
6804 Assert(CPUMIsGuestInRealModeEx(pCtx));
6805 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6806 Assert(!pVmxTransient->fIsNestedGuest);
6807
6808 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6809 AssertRCReturn(rc, rc);
6810
6811 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6812 if (rcStrict == VINF_SUCCESS)
6813 {
6814 if (!CPUMIsGuestInRealModeEx(pCtx))
6815 {
6816 /*
6817 * The guest is no longer in real-mode, check if we can continue executing the
6818 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6819 */
6820 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6821 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6822 {
6823 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6824 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6825 }
6826 else
6827 {
6828 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6829 rcStrict = VINF_EM_RESCHEDULE;
6830 }
6831 }
6832 else
6833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6834 }
6835 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6836 {
6837 rcStrict = VINF_SUCCESS;
6838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6839 }
6840 return VBOXSTRICTRC_VAL(rcStrict);
6841#endif
6842}
6843
6844
6845/**
6846 * VM-exit exception handler for \#DE (Divide Error).
6847 *
6848 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6849 */
6850static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6851{
6852 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6853 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
6854
6855 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6856 AssertRCReturn(rc, rc);
6857
6858 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
6859 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
6860 {
6861 uint8_t cbInstr = 0;
6862 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
6863 if (rc2 == VINF_SUCCESS)
6864 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
6865 else if (rc2 == VERR_NOT_FOUND)
6866 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
6867 else
6868 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
6869 }
6870 else
6871 rcStrict = VINF_SUCCESS; /* Do nothing. */
6872
6873 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
6874 if (RT_FAILURE(rcStrict))
6875 {
6876 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6877 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6878 rcStrict = VINF_SUCCESS;
6879 }
6880
6881 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
6882 return VBOXSTRICTRC_VAL(rcStrict);
6883}
6884
6885
6886/**
6887 * VM-exit exception handler wrapper for all other exceptions that are not handled
6888 * by a specific handler.
6889 *
6890 * This simply re-injects the exception back into the VM without any special
6891 * processing.
6892 *
6893 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6894 */
6895static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6896{
6897 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6898
6899#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6900# ifndef IN_NEM_DARWIN
6901 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6902 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6903 ("uVector=%#x u32XcptBitmap=%#X32\n",
6904 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6905 NOREF(pVmcsInfo);
6906# endif
6907#endif
6908
6909 /*
6910 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6911 * would have been handled while checking exits due to event delivery.
6912 */
6913 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6914
6915#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6916 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6917 AssertRCReturn(rc, rc);
6918 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6919#endif
6920
6921#ifdef VBOX_WITH_STATISTICS
6922 switch (uVector)
6923 {
6924 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6925 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6926 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6927 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6928 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6929 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6930 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6931 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6932 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6933 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6934 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6935 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6936 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6937 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6938 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6939 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6940 default:
6941 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6942 break;
6943 }
6944#endif
6945
6946 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6947 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6948 NOREF(uVector);
6949
6950 /* Re-inject the original exception into the guest. */
6951 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6952 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6953 return VINF_SUCCESS;
6954}
6955
6956
6957/**
6958 * VM-exit exception handler for all exceptions (except NMIs!).
6959 *
6960 * @remarks This may be called for both guests and nested-guests. Take care to not
6961 * make assumptions and avoid doing anything that is not relevant when
6962 * executing a nested-guest (e.g., Mesa driver hacks).
6963 */
6964static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6965{
6966 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6967
6968 /*
6969 * If this VM-exit occurred while delivering an event through the guest IDT, take
6970 * action based on the return code and additional hints (e.g. for page-faults)
6971 * that will be updated in the VMX transient structure.
6972 */
6973 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6974 if (rcStrict == VINF_SUCCESS)
6975 {
6976 /*
6977 * If an exception caused a VM-exit due to delivery of an event, the original
6978 * event may have to be re-injected into the guest. We shall reinject it and
6979 * continue guest execution. However, page-fault is a complicated case and
6980 * needs additional processing done in vmxHCExitXcptPF().
6981 */
6982 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6983 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6984 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6985 || uVector == X86_XCPT_PF)
6986 {
6987 switch (uVector)
6988 {
6989 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6990 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6991 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6992 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6993 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6994 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6995 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
6996 default:
6997 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6998 }
6999 }
7000 /* else: inject pending event before resuming guest execution. */
7001 }
7002 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7003 {
7004 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7005 rcStrict = VINF_SUCCESS;
7006 }
7007
7008 return rcStrict;
7009}
7010/** @} */
7011
7012
7013/** @name VM-exit handlers.
7014 * @{
7015 */
7016/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7017/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7018/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7019
7020/**
7021 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7022 */
7023HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7024{
7025 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7026 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7027
7028#ifndef IN_NEM_DARWIN
7029 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7030 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7031 return VINF_SUCCESS;
7032 return VINF_EM_RAW_INTERRUPT;
7033#else
7034 return VINF_SUCCESS;
7035#endif
7036}
7037
7038
7039/**
7040 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7041 * VM-exit.
7042 */
7043HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7044{
7045 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7046 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7047
7048 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7049
7050 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7051 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7052 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7053
7054 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7055 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7056 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7057 NOREF(pVmcsInfo);
7058
7059 VBOXSTRICTRC rcStrict;
7060 switch (uExitIntType)
7061 {
7062#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7063 /*
7064 * Host physical NMIs:
7065 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7066 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7067 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7068 *
7069 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7070 * See Intel spec. 27.5.5 "Updating Non-Register State".
7071 */
7072 case VMX_EXIT_INT_INFO_TYPE_NMI:
7073 {
7074 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7075 break;
7076 }
7077#endif
7078
7079 /*
7080 * Privileged software exceptions (#DB from ICEBP),
7081 * Software exceptions (#BP and #OF),
7082 * Hardware exceptions:
7083 * Process the required exceptions and resume guest execution if possible.
7084 */
7085 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7086 Assert(uVector == X86_XCPT_DB);
7087 RT_FALL_THRU();
7088 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7089 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7090 RT_FALL_THRU();
7091 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7092 {
7093 NOREF(uVector);
7094 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7095 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7096 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7097 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7098
7099 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7100 break;
7101 }
7102
7103 default:
7104 {
7105 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7106 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7107 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7108 break;
7109 }
7110 }
7111
7112 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7113 return rcStrict;
7114}
7115
7116
7117/**
7118 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7119 */
7120HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7121{
7122 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7123
7124 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7125 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7126 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7127
7128 /* Evaluate and deliver pending events and resume guest execution. */
7129 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7130 return VINF_SUCCESS;
7131}
7132
7133
7134/**
7135 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7136 */
7137HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7138{
7139 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7140
7141 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7142 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7143 {
7144 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7145 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7146 }
7147
7148 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7149
7150 /*
7151 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7152 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7153 */
7154 uint32_t fIntrState;
7155 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7156 AssertRC(rc);
7157 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7158 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7159 {
7160 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7161 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7162
7163 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7164 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7165 AssertRC(rc);
7166 }
7167
7168 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7169 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7170
7171 /* Evaluate and deliver pending events and resume guest execution. */
7172 return VINF_SUCCESS;
7173}
7174
7175
7176/**
7177 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7178 */
7179HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7180{
7181 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7182 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7183}
7184
7185
7186/**
7187 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7188 */
7189HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7190{
7191 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7192 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7193}
7194
7195
7196/**
7197 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7198 */
7199HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7200{
7201 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7202
7203 /*
7204 * Get the state we need and update the exit history entry.
7205 */
7206 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7207 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7208
7209 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7210 AssertRCReturn(rc, rc);
7211
7212 VBOXSTRICTRC rcStrict;
7213 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7214 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7215 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7216 if (!pExitRec)
7217 {
7218 /*
7219 * Regular CPUID instruction execution.
7220 */
7221 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7222 if (rcStrict == VINF_SUCCESS)
7223 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7224 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7225 {
7226 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7227 rcStrict = VINF_SUCCESS;
7228 }
7229 }
7230 else
7231 {
7232 /*
7233 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7234 */
7235 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7236 AssertRCReturn(rc2, rc2);
7237
7238 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7239 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7240
7241 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7242 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7243
7244 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7245 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7246 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7247 }
7248 return rcStrict;
7249}
7250
7251
7252/**
7253 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7254 */
7255HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7256{
7257 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7258
7259 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7260 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7261 AssertRCReturn(rc, rc);
7262
7263 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7264 return VINF_EM_RAW_EMULATE_INSTR;
7265
7266 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7267 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7268}
7269
7270
7271/**
7272 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7273 */
7274HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7275{
7276 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7277
7278 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7279 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7280 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7281 AssertRCReturn(rc, rc);
7282
7283 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7284 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7285 {
7286 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7287 we must reset offsetting on VM-entry. See @bugref{6634}. */
7288 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7289 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7290 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7291 }
7292 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7293 {
7294 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7295 rcStrict = VINF_SUCCESS;
7296 }
7297 return rcStrict;
7298}
7299
7300
7301/**
7302 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7303 */
7304HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7305{
7306 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7307
7308 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7309 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7310 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7311 AssertRCReturn(rc, rc);
7312
7313 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7314 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7315 {
7316 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7317 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7318 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7319 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7320 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7321 }
7322 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7323 {
7324 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7325 rcStrict = VINF_SUCCESS;
7326 }
7327 return rcStrict;
7328}
7329
7330
7331/**
7332 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7333 */
7334HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7335{
7336 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7337
7338 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7339 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7340 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7341 AssertRCReturn(rc, rc);
7342
7343 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7344 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7345 if (RT_LIKELY(rc == VINF_SUCCESS))
7346 {
7347 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7348 Assert(pVmxTransient->cbExitInstr == 2);
7349 }
7350 else
7351 {
7352 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7353 rc = VERR_EM_INTERPRETER;
7354 }
7355 return rc;
7356}
7357
7358
7359/**
7360 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7361 */
7362HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7363{
7364 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7365
7366 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7367 if (EMAreHypercallInstructionsEnabled(pVCpu))
7368 {
7369 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7370 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7371 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7372 AssertRCReturn(rc, rc);
7373
7374 /* Perform the hypercall. */
7375 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7376 if (rcStrict == VINF_SUCCESS)
7377 {
7378 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7379 AssertRCReturn(rc, rc);
7380 }
7381 else
7382 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7383 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7384 || RT_FAILURE(rcStrict));
7385
7386 /* If the hypercall changes anything other than guest's general-purpose registers,
7387 we would need to reload the guest changed bits here before VM-entry. */
7388 }
7389 else
7390 Log4Func(("Hypercalls not enabled\n"));
7391
7392 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7393 if (RT_FAILURE(rcStrict))
7394 {
7395 vmxHCSetPendingXcptUD(pVCpu);
7396 rcStrict = VINF_SUCCESS;
7397 }
7398
7399 return rcStrict;
7400}
7401
7402
7403/**
7404 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7405 */
7406HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7407{
7408 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7409#ifndef IN_NEM_DARWIN
7410 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7411#endif
7412
7413 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7414 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7415 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7416 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7417 AssertRCReturn(rc, rc);
7418
7419 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7420
7421 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7422 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7423 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7424 {
7425 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7426 rcStrict = VINF_SUCCESS;
7427 }
7428 else
7429 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7430 VBOXSTRICTRC_VAL(rcStrict)));
7431 return rcStrict;
7432}
7433
7434
7435/**
7436 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7437 */
7438HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7439{
7440 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7441
7442 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7443 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7444 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7445 AssertRCReturn(rc, rc);
7446
7447 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7448 if (rcStrict == VINF_SUCCESS)
7449 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7450 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7451 {
7452 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7453 rcStrict = VINF_SUCCESS;
7454 }
7455
7456 return rcStrict;
7457}
7458
7459
7460/**
7461 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7462 */
7463HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7464{
7465 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7466
7467 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7468 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7469 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7470 AssertRCReturn(rc, rc);
7471
7472 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7473 if (RT_SUCCESS(rcStrict))
7474 {
7475 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7476 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7477 rcStrict = VINF_SUCCESS;
7478 }
7479
7480 return rcStrict;
7481}
7482
7483
7484/**
7485 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7486 * VM-exit.
7487 */
7488HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7489{
7490 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7491 return VINF_EM_RESET;
7492}
7493
7494
7495/**
7496 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7497 */
7498HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7499{
7500 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7501
7502 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7503 AssertRCReturn(rc, rc);
7504
7505 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7506 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7507 rc = VINF_SUCCESS;
7508 else
7509 rc = VINF_EM_HALT;
7510
7511 if (rc != VINF_SUCCESS)
7512 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7513 return rc;
7514}
7515
7516
7517/**
7518 * VM-exit handler for instructions that result in a \#UD exception delivered to
7519 * the guest.
7520 */
7521HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7522{
7523 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7524 vmxHCSetPendingXcptUD(pVCpu);
7525 return VINF_SUCCESS;
7526}
7527
7528
7529/**
7530 * VM-exit handler for expiry of the VMX-preemption timer.
7531 */
7532HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7533{
7534 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7535
7536 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7537 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7538Log12(("vmxHCExitPreemptTimer:\n"));
7539
7540 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7541 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7542 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7543 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7544 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7545}
7546
7547
7548/**
7549 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7550 */
7551HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7552{
7553 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7554
7555 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7556 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7557 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7558 AssertRCReturn(rc, rc);
7559
7560 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7561 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7562 : HM_CHANGED_RAISED_XCPT_MASK);
7563
7564#ifndef IN_NEM_DARWIN
7565 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7566 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7567 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7568 {
7569 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7570 hmR0VmxUpdateStartVmFunction(pVCpu);
7571 }
7572#endif
7573
7574 return rcStrict;
7575}
7576
7577
7578/**
7579 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7580 */
7581HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7582{
7583 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7584
7585 /** @todo Enable the new code after finding a reliably guest test-case. */
7586#if 1
7587 return VERR_EM_INTERPRETER;
7588#else
7589 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7590 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7591 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7592 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7593 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7594 AssertRCReturn(rc, rc);
7595
7596 /* Paranoia. Ensure this has a memory operand. */
7597 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7598
7599 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7600 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7601 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7602 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7603
7604 RTGCPTR GCPtrDesc;
7605 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7606
7607 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7608 GCPtrDesc, uType);
7609 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7610 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7611 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7612 {
7613 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7614 rcStrict = VINF_SUCCESS;
7615 }
7616 return rcStrict;
7617#endif
7618}
7619
7620
7621/**
7622 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7623 * VM-exit.
7624 */
7625HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7626{
7627 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7628 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7629 AssertRCReturn(rc, rc);
7630
7631 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7632 if (RT_FAILURE(rc))
7633 return rc;
7634
7635 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7636 NOREF(uInvalidReason);
7637
7638#ifdef VBOX_STRICT
7639 uint32_t fIntrState;
7640 uint64_t u64Val;
7641 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7642 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7643 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7644
7645 Log4(("uInvalidReason %u\n", uInvalidReason));
7646 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7647 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7648 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7649
7650 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7651 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7652 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7653 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7654 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7655 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7656 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7657 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7658 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7659 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7660 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7661 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7662# ifndef IN_NEM_DARWIN
7663 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7664 {
7665 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7666 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7667 }
7668
7669 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7670# endif
7671#endif
7672
7673 return VERR_VMX_INVALID_GUEST_STATE;
7674}
7675
7676/**
7677 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7678 */
7679HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7680{
7681 /*
7682 * Cumulative notes of all recognized but unexpected VM-exits.
7683 *
7684 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7685 * nested-paging is used.
7686 *
7687 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7688 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7689 * this function (and thereby stop VM execution) for handling such instructions.
7690 *
7691 *
7692 * VMX_EXIT_INIT_SIGNAL:
7693 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7694 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7695 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7696 *
7697 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7698 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7699 * See Intel spec. "23.8 Restrictions on VMX operation".
7700 *
7701 * VMX_EXIT_SIPI:
7702 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7703 * activity state is used. We don't make use of it as our guests don't have direct
7704 * access to the host local APIC.
7705 *
7706 * See Intel spec. 25.3 "Other Causes of VM-exits".
7707 *
7708 * VMX_EXIT_IO_SMI:
7709 * VMX_EXIT_SMI:
7710 * This can only happen if we support dual-monitor treatment of SMI, which can be
7711 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7712 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7713 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7714 *
7715 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7716 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7717 *
7718 * VMX_EXIT_ERR_MSR_LOAD:
7719 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7720 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7721 * execution.
7722 *
7723 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7724 *
7725 * VMX_EXIT_ERR_MACHINE_CHECK:
7726 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7727 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7728 * #MC exception abort class exception is raised. We thus cannot assume a
7729 * reasonable chance of continuing any sort of execution and we bail.
7730 *
7731 * See Intel spec. 15.1 "Machine-check Architecture".
7732 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7733 *
7734 * VMX_EXIT_PML_FULL:
7735 * VMX_EXIT_VIRTUALIZED_EOI:
7736 * VMX_EXIT_APIC_WRITE:
7737 * We do not currently support any of these features and thus they are all unexpected
7738 * VM-exits.
7739 *
7740 * VMX_EXIT_GDTR_IDTR_ACCESS:
7741 * VMX_EXIT_LDTR_TR_ACCESS:
7742 * VMX_EXIT_RDRAND:
7743 * VMX_EXIT_RSM:
7744 * VMX_EXIT_VMFUNC:
7745 * VMX_EXIT_ENCLS:
7746 * VMX_EXIT_RDSEED:
7747 * VMX_EXIT_XSAVES:
7748 * VMX_EXIT_XRSTORS:
7749 * VMX_EXIT_UMWAIT:
7750 * VMX_EXIT_TPAUSE:
7751 * VMX_EXIT_LOADIWKEY:
7752 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7753 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7754 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7755 *
7756 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7757 */
7758 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7759 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7760 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7761}
7762
7763
7764/**
7765 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7766 */
7767HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7768{
7769 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7770
7771 /** @todo Optimize this: We currently drag in the whole MSR state
7772 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7773 * MSRs required. That would require changes to IEM and possibly CPUM too.
7774 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7775 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7776 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7777 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7778 switch (idMsr)
7779 {
7780 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7781 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7782 }
7783
7784 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7785 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7786 AssertRCReturn(rc, rc);
7787
7788 Log4Func(("ecx=%#RX32\n", idMsr));
7789
7790#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7791 Assert(!pVmxTransient->fIsNestedGuest);
7792 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7793 {
7794 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7795 && idMsr != MSR_K6_EFER)
7796 {
7797 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7798 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7799 }
7800 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7801 {
7802 Assert(pVmcsInfo->pvMsrBitmap);
7803 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7804 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7805 {
7806 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7807 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7808 }
7809 }
7810 }
7811#endif
7812
7813 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7814 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7815 if (rcStrict == VINF_SUCCESS)
7816 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7817 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7818 {
7819 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7820 rcStrict = VINF_SUCCESS;
7821 }
7822 else
7823 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7824 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7825
7826 return rcStrict;
7827}
7828
7829
7830/**
7831 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7832 */
7833HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7834{
7835 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7836
7837 /** @todo Optimize this: We currently drag in the whole MSR state
7838 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7839 * MSRs required. That would require changes to IEM and possibly CPUM too.
7840 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7841 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7842 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7843
7844 /*
7845 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7846 * Although we don't need to fetch the base as it will be overwritten shortly, while
7847 * loading guest-state we would also load the entire segment register including limit
7848 * and attributes and thus we need to load them here.
7849 */
7850 switch (idMsr)
7851 {
7852 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7853 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7854 }
7855
7856 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7857 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7858 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7859 AssertRCReturn(rc, rc);
7860
7861 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7862
7863 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7864 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7865
7866 if (rcStrict == VINF_SUCCESS)
7867 {
7868 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7869
7870 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7871 if ( idMsr == MSR_IA32_APICBASE
7872 || ( idMsr >= MSR_IA32_X2APIC_START
7873 && idMsr <= MSR_IA32_X2APIC_END))
7874 {
7875 /*
7876 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7877 * When full APIC register virtualization is implemented we'll have to make
7878 * sure APIC state is saved from the VMCS before IEM changes it.
7879 */
7880 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7881 }
7882 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7883 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7884 else if (idMsr == MSR_K6_EFER)
7885 {
7886 /*
7887 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7888 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7889 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7890 */
7891 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7892 }
7893
7894 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7895 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7896 {
7897 switch (idMsr)
7898 {
7899 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7900 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7901 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7902 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7903 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7904 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7905 default:
7906 {
7907#ifndef IN_NEM_DARWIN
7908 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7909 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7910 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7911 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7912#else
7913 AssertMsgFailed(("TODO\n"));
7914#endif
7915 break;
7916 }
7917 }
7918 }
7919#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7920 else
7921 {
7922 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7923 switch (idMsr)
7924 {
7925 case MSR_IA32_SYSENTER_CS:
7926 case MSR_IA32_SYSENTER_EIP:
7927 case MSR_IA32_SYSENTER_ESP:
7928 case MSR_K8_FS_BASE:
7929 case MSR_K8_GS_BASE:
7930 {
7931 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7932 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7933 }
7934
7935 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7936 default:
7937 {
7938 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7939 {
7940 /* EFER MSR writes are always intercepted. */
7941 if (idMsr != MSR_K6_EFER)
7942 {
7943 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7944 idMsr));
7945 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7946 }
7947 }
7948
7949 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7950 {
7951 Assert(pVmcsInfo->pvMsrBitmap);
7952 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7953 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7954 {
7955 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7956 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7957 }
7958 }
7959 break;
7960 }
7961 }
7962 }
7963#endif /* VBOX_STRICT */
7964 }
7965 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7966 {
7967 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7968 rcStrict = VINF_SUCCESS;
7969 }
7970 else
7971 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7972 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7973
7974 return rcStrict;
7975}
7976
7977
7978/**
7979 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7980 */
7981HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7982{
7983 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7984
7985 /** @todo The guest has likely hit a contended spinlock. We might want to
7986 * poke a schedule different guest VCPU. */
7987 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7988 if (RT_SUCCESS(rc))
7989 return VINF_EM_RAW_INTERRUPT;
7990
7991 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7992 return rc;
7993}
7994
7995
7996/**
7997 * VM-exit handler for when the TPR value is lowered below the specified
7998 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7999 */
8000HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8001{
8002 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8003 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8004
8005 /*
8006 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8007 * We'll re-evaluate pending interrupts and inject them before the next VM
8008 * entry so we can just continue execution here.
8009 */
8010 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8011 return VINF_SUCCESS;
8012}
8013
8014
8015/**
8016 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8017 * VM-exit.
8018 *
8019 * @retval VINF_SUCCESS when guest execution can continue.
8020 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8021 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8022 * incompatible guest state for VMX execution (real-on-v86 case).
8023 */
8024HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8025{
8026 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8027 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8028
8029 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8030 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8031 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8032
8033 VBOXSTRICTRC rcStrict;
8034 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8035 uint64_t const uExitQual = pVmxTransient->uExitQual;
8036 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8037 switch (uAccessType)
8038 {
8039 /*
8040 * MOV to CRx.
8041 */
8042 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8043 {
8044 /*
8045 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8046 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8047 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8048 * PAE PDPTEs as well.
8049 */
8050 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8051 AssertRCReturn(rc, rc);
8052
8053 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8054#ifndef IN_NEM_DARWIN
8055 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8056#endif
8057 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8058 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8059
8060 /*
8061 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8062 * - When nested paging isn't used.
8063 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8064 * - We are executing in the VM debug loop.
8065 */
8066#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8067# ifndef IN_NEM_DARWIN
8068 Assert( iCrReg != 3
8069 || !VM_IS_VMX_NESTED_PAGING(pVM)
8070 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8071 || pVCpu->hmr0.s.fUsingDebugLoop);
8072# else
8073 Assert( iCrReg != 3
8074 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8075# endif
8076#endif
8077
8078 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8079 Assert( iCrReg != 8
8080 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8081
8082 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8083 AssertMsg( rcStrict == VINF_SUCCESS
8084 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8085
8086#ifndef IN_NEM_DARWIN
8087 /*
8088 * This is a kludge for handling switches back to real mode when we try to use
8089 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8090 * deal with special selector values, so we have to return to ring-3 and run
8091 * there till the selector values are V86 mode compatible.
8092 *
8093 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8094 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8095 * this function.
8096 */
8097 if ( iCrReg == 0
8098 && rcStrict == VINF_SUCCESS
8099 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8100 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8101 && (uOldCr0 & X86_CR0_PE)
8102 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8103 {
8104 /** @todo Check selectors rather than returning all the time. */
8105 Assert(!pVmxTransient->fIsNestedGuest);
8106 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8107 rcStrict = VINF_EM_RESCHEDULE_REM;
8108 }
8109#endif
8110
8111 break;
8112 }
8113
8114 /*
8115 * MOV from CRx.
8116 */
8117 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8118 {
8119 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8120 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8121
8122 /*
8123 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8124 * - When nested paging isn't used.
8125 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8126 * - We are executing in the VM debug loop.
8127 */
8128#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8129# ifndef IN_NEM_DARWIN
8130 Assert( iCrReg != 3
8131 || !VM_IS_VMX_NESTED_PAGING(pVM)
8132 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8133 || pVCpu->hmr0.s.fLeaveDone);
8134# else
8135 Assert( iCrReg != 3
8136 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8137# endif
8138#endif
8139
8140 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8141 Assert( iCrReg != 8
8142 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8143
8144 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8145 break;
8146 }
8147
8148 /*
8149 * CLTS (Clear Task-Switch Flag in CR0).
8150 */
8151 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8152 {
8153 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8154 break;
8155 }
8156
8157 /*
8158 * LMSW (Load Machine-Status Word into CR0).
8159 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8160 */
8161 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8162 {
8163 RTGCPTR GCPtrEffDst;
8164 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8165 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8166 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8167 if (fMemOperand)
8168 {
8169 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8170 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8171 }
8172 else
8173 GCPtrEffDst = NIL_RTGCPTR;
8174 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8175 break;
8176 }
8177
8178 default:
8179 {
8180 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8181 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8182 }
8183 }
8184
8185 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8186 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8187 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8188
8189 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8190 NOREF(pVM);
8191 return rcStrict;
8192}
8193
8194
8195/**
8196 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8197 * VM-exit.
8198 */
8199HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8200{
8201 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8202 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8203
8204 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8205 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8206 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8207 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8208 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8209 | CPUMCTX_EXTRN_EFER);
8210 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8211 AssertRCReturn(rc, rc);
8212
8213 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8214 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8215 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8216 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8217 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8218 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8219 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8220 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8221
8222 /*
8223 * Update exit history to see if this exit can be optimized.
8224 */
8225 VBOXSTRICTRC rcStrict;
8226 PCEMEXITREC pExitRec = NULL;
8227 if ( !fGstStepping
8228 && !fDbgStepping)
8229 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8230 !fIOString
8231 ? !fIOWrite
8232 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8233 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8234 : !fIOWrite
8235 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8236 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8237 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8238 if (!pExitRec)
8239 {
8240 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8241 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8242
8243 uint32_t const cbValue = s_aIOSizes[uIOSize];
8244 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8245 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8246 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8247 if (fIOString)
8248 {
8249 /*
8250 * INS/OUTS - I/O String instruction.
8251 *
8252 * Use instruction-information if available, otherwise fall back on
8253 * interpreting the instruction.
8254 */
8255 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8256 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8257 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8258 if (fInsOutsInfo)
8259 {
8260 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8261 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8262 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8263 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8264 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8265 if (fIOWrite)
8266 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8267 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8268 else
8269 {
8270 /*
8271 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8272 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8273 * See Intel Instruction spec. for "INS".
8274 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8275 */
8276 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8277 }
8278 }
8279 else
8280 rcStrict = IEMExecOne(pVCpu);
8281
8282 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8283 fUpdateRipAlready = true;
8284 }
8285 else
8286 {
8287 /*
8288 * IN/OUT - I/O instruction.
8289 */
8290 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8291 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8292 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8293 if (fIOWrite)
8294 {
8295 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8296 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8297#ifndef IN_NEM_DARWIN
8298 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8299 && !pCtx->eflags.Bits.u1TF)
8300 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8301#endif
8302 }
8303 else
8304 {
8305 uint32_t u32Result = 0;
8306 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8307 if (IOM_SUCCESS(rcStrict))
8308 {
8309 /* Save result of I/O IN instr. in AL/AX/EAX. */
8310 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8311 }
8312#ifndef IN_NEM_DARWIN
8313 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8314 && !pCtx->eflags.Bits.u1TF)
8315 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8316#endif
8317 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8318 }
8319 }
8320
8321 if (IOM_SUCCESS(rcStrict))
8322 {
8323 if (!fUpdateRipAlready)
8324 {
8325 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8326 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8327 }
8328
8329 /*
8330 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8331 * while booting Fedora 17 64-bit guest.
8332 *
8333 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8334 */
8335 if (fIOString)
8336 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8337
8338 /*
8339 * If any I/O breakpoints are armed, we need to check if one triggered
8340 * and take appropriate action.
8341 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8342 */
8343 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8344 AssertRCReturn(rc, rc);
8345
8346 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8347 * execution engines about whether hyper BPs and such are pending. */
8348 uint32_t const uDr7 = pCtx->dr[7];
8349 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8350 && X86_DR7_ANY_RW_IO(uDr7)
8351 && (pCtx->cr4 & X86_CR4_DE))
8352 || DBGFBpIsHwIoArmed(pVM)))
8353 {
8354 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8355
8356#ifndef IN_NEM_DARWIN
8357 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8358 VMMRZCallRing3Disable(pVCpu);
8359 HM_DISABLE_PREEMPT(pVCpu);
8360
8361 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8362
8363 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8364 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8365 {
8366 /* Raise #DB. */
8367 if (fIsGuestDbgActive)
8368 ASMSetDR6(pCtx->dr[6]);
8369 if (pCtx->dr[7] != uDr7)
8370 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8371
8372 vmxHCSetPendingXcptDB(pVCpu);
8373 }
8374 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8375 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8376 else if ( rcStrict2 != VINF_SUCCESS
8377 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8378 rcStrict = rcStrict2;
8379 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8380
8381 HM_RESTORE_PREEMPT();
8382 VMMRZCallRing3Enable(pVCpu);
8383#else
8384 /** @todo */
8385#endif
8386 }
8387 }
8388
8389#ifdef VBOX_STRICT
8390 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8391 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8392 Assert(!fIOWrite);
8393 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8394 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8395 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8396 Assert(fIOWrite);
8397 else
8398 {
8399# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8400 * statuses, that the VMM device and some others may return. See
8401 * IOM_SUCCESS() for guidance. */
8402 AssertMsg( RT_FAILURE(rcStrict)
8403 || rcStrict == VINF_SUCCESS
8404 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8405 || rcStrict == VINF_EM_DBG_BREAKPOINT
8406 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8407 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8408# endif
8409 }
8410#endif
8411 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8412 }
8413 else
8414 {
8415 /*
8416 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8417 */
8418 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8419 AssertRCReturn(rc2, rc2);
8420 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8421 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8422 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8423 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8424 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8425 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8426
8427 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8428 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8429
8430 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8431 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8432 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8433 }
8434 return rcStrict;
8435}
8436
8437
8438/**
8439 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8440 * VM-exit.
8441 */
8442HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8443{
8444 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8445
8446 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8447 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8448 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8449 {
8450 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8451 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8452 {
8453 uint32_t uErrCode;
8454 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8455 {
8456 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8457 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8458 }
8459 else
8460 uErrCode = 0;
8461
8462 RTGCUINTPTR GCPtrFaultAddress;
8463 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8464 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8465 else
8466 GCPtrFaultAddress = 0;
8467
8468 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8469
8470 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8471 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8472
8473 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8474 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8475 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8476 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8477 }
8478 }
8479
8480 /* Fall back to the interpreter to emulate the task-switch. */
8481 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8482 return VERR_EM_INTERPRETER;
8483}
8484
8485
8486/**
8487 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8488 */
8489HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8490{
8491 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8492
8493 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8494 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8495 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8496 AssertRC(rc);
8497 return VINF_EM_DBG_STEPPED;
8498}
8499
8500
8501/**
8502 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8503 */
8504HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8505{
8506 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8507 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8508
8509 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8510 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8511 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8512 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8513 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8514
8515 /*
8516 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8517 */
8518 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8519 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8520 {
8521 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8522 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8523 {
8524 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8525 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8526 }
8527 }
8528 else
8529 {
8530 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8531 return rcStrict;
8532 }
8533
8534 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8535 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8536 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8537 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8538 AssertRCReturn(rc, rc);
8539
8540 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8541 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8542 switch (uAccessType)
8543 {
8544#ifndef IN_NEM_DARWIN
8545 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8546 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8547 {
8548 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8549 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8550 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8551
8552 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8553 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8554 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8555 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8556 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8557
8558 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8559 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8560 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8561 if ( rcStrict == VINF_SUCCESS
8562 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8563 || rcStrict == VERR_PAGE_NOT_PRESENT)
8564 {
8565 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8566 | HM_CHANGED_GUEST_APIC_TPR);
8567 rcStrict = VINF_SUCCESS;
8568 }
8569 break;
8570 }
8571#else
8572 /** @todo */
8573#endif
8574
8575 default:
8576 {
8577 Log4Func(("uAccessType=%#x\n", uAccessType));
8578 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8579 break;
8580 }
8581 }
8582
8583 if (rcStrict != VINF_SUCCESS)
8584 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8585 return rcStrict;
8586}
8587
8588
8589/**
8590 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8591 * VM-exit.
8592 */
8593HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8594{
8595 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8596 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8597
8598 /*
8599 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8600 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8601 * must emulate the MOV DRx access.
8602 */
8603 if (!pVmxTransient->fIsNestedGuest)
8604 {
8605 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8606 if (pVmxTransient->fWasGuestDebugStateActive)
8607 {
8608 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8609 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8610 }
8611
8612 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8613 && !pVmxTransient->fWasHyperDebugStateActive)
8614 {
8615 Assert(!DBGFIsStepping(pVCpu));
8616 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8617
8618 /* Don't intercept MOV DRx any more. */
8619 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8620 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8621 AssertRC(rc);
8622
8623#ifndef IN_NEM_DARWIN
8624 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8625 VMMRZCallRing3Disable(pVCpu);
8626 HM_DISABLE_PREEMPT(pVCpu);
8627
8628 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8629 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8630 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8631
8632 HM_RESTORE_PREEMPT();
8633 VMMRZCallRing3Enable(pVCpu);
8634#else
8635 CPUMR3NemActivateGuestDebugState(pVCpu);
8636 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8637 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8638#endif
8639
8640#ifdef VBOX_WITH_STATISTICS
8641 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8642 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8643 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8644 else
8645 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8646#endif
8647 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8648 return VINF_SUCCESS;
8649 }
8650 }
8651
8652 /*
8653 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8654 * The EFER MSR is always up-to-date.
8655 * Update the segment registers and DR7 from the CPU.
8656 */
8657 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8658 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8659 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8660 AssertRCReturn(rc, rc);
8661 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8662
8663 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8664 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8665 {
8666 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8667 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8668 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8669 if (RT_SUCCESS(rc))
8670 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8671 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8672 }
8673 else
8674 {
8675 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8676 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8677 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8678 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8679 }
8680
8681 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8682 if (RT_SUCCESS(rc))
8683 {
8684 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8685 AssertRCReturn(rc2, rc2);
8686 return VINF_SUCCESS;
8687 }
8688 return rc;
8689}
8690
8691
8692/**
8693 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8694 * Conditional VM-exit.
8695 */
8696HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8697{
8698 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8699
8700#ifndef IN_NEM_DARWIN
8701 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8702
8703 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8704 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8705 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8706 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8707 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8708
8709 /*
8710 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8711 */
8712 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8713 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8714 {
8715 /*
8716 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8717 * instruction emulation to inject the original event. Otherwise, injecting the original event
8718 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8719 */
8720 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8721 { /* likely */ }
8722 else
8723 {
8724 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8725#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8726 /** @todo NSTVMX: Think about how this should be handled. */
8727 if (pVmxTransient->fIsNestedGuest)
8728 return VERR_VMX_IPE_3;
8729#endif
8730 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8731 }
8732 }
8733 else
8734 {
8735 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8736 return rcStrict;
8737 }
8738
8739 /*
8740 * Get sufficient state and update the exit history entry.
8741 */
8742 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8743 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8744 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8745 AssertRCReturn(rc, rc);
8746
8747 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8748 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8749 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8750 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8751 if (!pExitRec)
8752 {
8753 /*
8754 * If we succeed, resume guest execution.
8755 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8756 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8757 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8758 * weird case. See @bugref{6043}.
8759 */
8760 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8761 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8762/** @todo bird: We can probably just go straight to IOM here and assume that
8763 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8764 * well. However, we need to address that aliasing workarounds that
8765 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8766 *
8767 * Might also be interesting to see if we can get this done more or
8768 * less locklessly inside IOM. Need to consider the lookup table
8769 * updating and use a bit more carefully first (or do all updates via
8770 * rendezvous) */
8771 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8772 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8773 if ( rcStrict == VINF_SUCCESS
8774 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8775 || rcStrict == VERR_PAGE_NOT_PRESENT)
8776 {
8777 /* Successfully handled MMIO operation. */
8778 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8779 | HM_CHANGED_GUEST_APIC_TPR);
8780 rcStrict = VINF_SUCCESS;
8781 }
8782 }
8783 else
8784 {
8785 /*
8786 * Frequent exit or something needing probing. Call EMHistoryExec.
8787 */
8788 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8789 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8790
8791 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8792 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8793
8794 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8795 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8796 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8797 }
8798 return rcStrict;
8799#else
8800 AssertFailed();
8801 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8802#endif
8803}
8804
8805
8806/**
8807 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8808 * VM-exit.
8809 */
8810HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8811{
8812 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8813#ifndef IN_NEM_DARWIN
8814 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8815
8816 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8817 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8818 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8819 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8820 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8821 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8822
8823 /*
8824 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8825 */
8826 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8827 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8828 {
8829 /*
8830 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8831 * we shall resolve the nested #PF and re-inject the original event.
8832 */
8833 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8834 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8835 }
8836 else
8837 {
8838 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8839 return rcStrict;
8840 }
8841
8842 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8843 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8844 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8845 AssertRCReturn(rc, rc);
8846
8847 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8848 uint64_t const uExitQual = pVmxTransient->uExitQual;
8849 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8850
8851 RTGCUINT uErrorCode = 0;
8852 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8853 uErrorCode |= X86_TRAP_PF_ID;
8854 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8855 uErrorCode |= X86_TRAP_PF_RW;
8856 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8857 uErrorCode |= X86_TRAP_PF_P;
8858
8859 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8860 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8861
8862 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8863
8864 /*
8865 * Handle the pagefault trap for the nested shadow table.
8866 */
8867 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8868 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8869 TRPMResetTrap(pVCpu);
8870
8871 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8872 if ( rcStrict == VINF_SUCCESS
8873 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8874 || rcStrict == VERR_PAGE_NOT_PRESENT)
8875 {
8876 /* Successfully synced our nested page tables. */
8877 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8879 return VINF_SUCCESS;
8880 }
8881#else
8882 PVM pVM = pVCpu->CTX_SUFF(pVM);
8883 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8884 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8885 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8886 vmxHCImportGuestRip(pVCpu);
8887 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8888
8889 /*
8890 * Ask PGM for information about the given GCPhys. We need to check if we're
8891 * out of sync first.
8892 */
8893 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8894 PGMPHYSNEMPAGEINFO Info;
8895 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8896 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8897 if (RT_SUCCESS(rc))
8898 {
8899 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8900 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8901 {
8902 if (State.fCanResume)
8903 {
8904 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8905 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8906 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8907 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8908 State.fDidSomething ? "" : " no-change"));
8909 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8910 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8911 return VINF_SUCCESS;
8912 }
8913 }
8914
8915 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8916 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8917 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8918 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8919 State.fDidSomething ? "" : " no-change"));
8920 }
8921 else
8922 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8923 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8924 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8925
8926 /*
8927 * Emulate the memory access, either access handler or special memory.
8928 */
8929 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8930 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8931 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8932 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8933 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8934
8935 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8936 AssertRCReturn(rc, rc);
8937
8938 VBOXSTRICTRC rcStrict;
8939 if (!pExitRec)
8940 rcStrict = IEMExecOne(pVCpu);
8941 else
8942 {
8943 /* Frequent access or probing. */
8944 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8945 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8946 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8947 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8948 }
8949
8950 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8951#endif
8952
8953 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8954 return rcStrict;
8955}
8956
8957
8958#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8959/**
8960 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8961 */
8962HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8963{
8964 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8965
8966 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8967 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8968 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8969 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8970 | CPUMCTX_EXTRN_HWVIRT
8971 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8972 AssertRCReturn(rc, rc);
8973
8974 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8975
8976 VMXVEXITINFO ExitInfo;
8977 RT_ZERO(ExitInfo);
8978 ExitInfo.uReason = pVmxTransient->uExitReason;
8979 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8980 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8981 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8982 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8983
8984 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8985 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8986 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8987 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8988 {
8989 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8990 rcStrict = VINF_SUCCESS;
8991 }
8992 return rcStrict;
8993}
8994
8995
8996/**
8997 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8998 */
8999HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9000{
9001 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9002
9003 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9004 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9005 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9006 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9007 AssertRCReturn(rc, rc);
9008
9009 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9010
9011 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9012 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9013 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9014 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9015 {
9016 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9017 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9018 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9019 }
9020 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9021 return rcStrict;
9022}
9023
9024
9025/**
9026 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9027 */
9028HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9029{
9030 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9031
9032 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9033 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9034 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9035 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9036 | CPUMCTX_EXTRN_HWVIRT
9037 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9038 AssertRCReturn(rc, rc);
9039
9040 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9041
9042 VMXVEXITINFO ExitInfo;
9043 RT_ZERO(ExitInfo);
9044 ExitInfo.uReason = pVmxTransient->uExitReason;
9045 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9046 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9047 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9048 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9049
9050 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9051 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9052 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9053 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9054 {
9055 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9056 rcStrict = VINF_SUCCESS;
9057 }
9058 return rcStrict;
9059}
9060
9061
9062/**
9063 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9064 */
9065HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9066{
9067 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9068
9069 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9070 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9071 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9072 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9073 | CPUMCTX_EXTRN_HWVIRT
9074 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9075 AssertRCReturn(rc, rc);
9076
9077 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9078
9079 VMXVEXITINFO ExitInfo;
9080 RT_ZERO(ExitInfo);
9081 ExitInfo.uReason = pVmxTransient->uExitReason;
9082 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9083 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9084 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9085 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9086
9087 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9088 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9089 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9090 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9091 {
9092 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9093 rcStrict = VINF_SUCCESS;
9094 }
9095 return rcStrict;
9096}
9097
9098
9099/**
9100 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9101 */
9102HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9103{
9104 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9105
9106 /*
9107 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9108 * thus might not need to import the shadow VMCS state, it's safer just in case
9109 * code elsewhere dares look at unsynced VMCS fields.
9110 */
9111 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9112 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9113 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9114 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9115 | CPUMCTX_EXTRN_HWVIRT
9116 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9117 AssertRCReturn(rc, rc);
9118
9119 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9120
9121 VMXVEXITINFO ExitInfo;
9122 RT_ZERO(ExitInfo);
9123 ExitInfo.uReason = pVmxTransient->uExitReason;
9124 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9125 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9126 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9127 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9128 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9129
9130 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9131 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9132 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9133 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9134 {
9135 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9136 rcStrict = VINF_SUCCESS;
9137 }
9138 return rcStrict;
9139}
9140
9141
9142/**
9143 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9144 */
9145HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9146{
9147 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9148
9149 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9150 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9151 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9152 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9153 AssertRCReturn(rc, rc);
9154
9155 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9156
9157 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9158 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9159 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9160 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9161 {
9162 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9163 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9164 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9165 }
9166 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9167 return rcStrict;
9168}
9169
9170
9171/**
9172 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9173 */
9174HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9175{
9176 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9177
9178 /*
9179 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9180 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9181 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9182 */
9183 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9184 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9185 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9186 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9187 | CPUMCTX_EXTRN_HWVIRT
9188 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9189 AssertRCReturn(rc, rc);
9190
9191 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9192
9193 VMXVEXITINFO ExitInfo;
9194 RT_ZERO(ExitInfo);
9195 ExitInfo.uReason = pVmxTransient->uExitReason;
9196 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9197 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9198 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9199 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9200 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9201
9202 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9203 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9204 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9205 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9206 {
9207 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9208 rcStrict = VINF_SUCCESS;
9209 }
9210 return rcStrict;
9211}
9212
9213
9214/**
9215 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9216 */
9217HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9218{
9219 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9220
9221 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9222 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9223 | CPUMCTX_EXTRN_HWVIRT
9224 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9225 AssertRCReturn(rc, rc);
9226
9227 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9228
9229 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9230 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9231 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9232 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9233 {
9234 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9235 rcStrict = VINF_SUCCESS;
9236 }
9237 return rcStrict;
9238}
9239
9240
9241/**
9242 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9243 */
9244HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9245{
9246 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9247
9248 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9249 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9250 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9251 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9252 | CPUMCTX_EXTRN_HWVIRT
9253 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9254 AssertRCReturn(rc, rc);
9255
9256 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9257
9258 VMXVEXITINFO ExitInfo;
9259 RT_ZERO(ExitInfo);
9260 ExitInfo.uReason = pVmxTransient->uExitReason;
9261 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9262 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9263 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9264 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9265
9266 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9267 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9268 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9269 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9270 {
9271 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9272 rcStrict = VINF_SUCCESS;
9273 }
9274 return rcStrict;
9275}
9276
9277
9278/**
9279 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9280 */
9281HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9282{
9283 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9284
9285 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9286 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9287 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9288 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9289 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9290 AssertRCReturn(rc, rc);
9291
9292 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9293
9294 VMXVEXITINFO ExitInfo;
9295 RT_ZERO(ExitInfo);
9296 ExitInfo.uReason = pVmxTransient->uExitReason;
9297 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9298 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9299 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9300 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9301
9302 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9303 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9304 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9305 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9306 {
9307 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9308 rcStrict = VINF_SUCCESS;
9309 }
9310 return rcStrict;
9311}
9312
9313
9314# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9315/**
9316 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9317 */
9318HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9319{
9320 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9321
9322 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9323 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9324 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9325 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9326 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9327 AssertRCReturn(rc, rc);
9328
9329 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9330
9331 VMXVEXITINFO ExitInfo;
9332 RT_ZERO(ExitInfo);
9333 ExitInfo.uReason = pVmxTransient->uExitReason;
9334 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9335 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9336 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9337 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9338
9339 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9340 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9341 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9342 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9343 {
9344 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9345 rcStrict = VINF_SUCCESS;
9346 }
9347 return rcStrict;
9348}
9349# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9350#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9351/** @} */
9352
9353
9354#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9355/** @name Nested-guest VM-exit handlers.
9356 * @{
9357 */
9358/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9359/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9360/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9361
9362/**
9363 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9364 * Conditional VM-exit.
9365 */
9366HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9367{
9368 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9369
9370 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9371
9372 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9373 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9374 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9375
9376 switch (uExitIntType)
9377 {
9378#ifndef IN_NEM_DARWIN
9379 /*
9380 * Physical NMIs:
9381 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9382 */
9383 case VMX_EXIT_INT_INFO_TYPE_NMI:
9384 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9385#endif
9386
9387 /*
9388 * Hardware exceptions,
9389 * Software exceptions,
9390 * Privileged software exceptions:
9391 * Figure out if the exception must be delivered to the guest or the nested-guest.
9392 */
9393 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9394 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9395 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9396 {
9397 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9398 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9399 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9400 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9401
9402 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9403 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9404 pVmxTransient->uExitIntErrorCode);
9405 if (fIntercept)
9406 {
9407 /* Exit qualification is required for debug and page-fault exceptions. */
9408 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9409
9410 /*
9411 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9412 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9413 * length. However, if delivery of a software interrupt, software exception or privileged
9414 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9415 */
9416 VMXVEXITINFO ExitInfo;
9417 RT_ZERO(ExitInfo);
9418 ExitInfo.uReason = pVmxTransient->uExitReason;
9419 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9420 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9421
9422 VMXVEXITEVENTINFO ExitEventInfo;
9423 RT_ZERO(ExitEventInfo);
9424 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9425 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9426 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9427 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9428
9429#ifdef DEBUG_ramshankar
9430 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9431 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9432 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9433 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9434 {
9435 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9436 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9437 }
9438#endif
9439 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9440 }
9441
9442 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9443 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9444 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9445 }
9446
9447 /*
9448 * Software interrupts:
9449 * VM-exits cannot be caused by software interrupts.
9450 *
9451 * External interrupts:
9452 * This should only happen when "acknowledge external interrupts on VM-exit"
9453 * control is set. However, we never set this when executing a guest or
9454 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9455 * the guest.
9456 */
9457 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9458 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9459 default:
9460 {
9461 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9462 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9463 }
9464 }
9465}
9466
9467
9468/**
9469 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9470 * Unconditional VM-exit.
9471 */
9472HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9473{
9474 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9475 return IEMExecVmxVmexitTripleFault(pVCpu);
9476}
9477
9478
9479/**
9480 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9481 */
9482HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9483{
9484 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9485
9486 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9487 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9488 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9489}
9490
9491
9492/**
9493 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9494 */
9495HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9496{
9497 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9498
9499 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9500 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9501 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9502}
9503
9504
9505/**
9506 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9507 * Unconditional VM-exit.
9508 */
9509HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9510{
9511 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9512
9513 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9514 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9515 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9516 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9517
9518 VMXVEXITINFO ExitInfo;
9519 RT_ZERO(ExitInfo);
9520 ExitInfo.uReason = pVmxTransient->uExitReason;
9521 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9522 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9523
9524 VMXVEXITEVENTINFO ExitEventInfo;
9525 RT_ZERO(ExitEventInfo);
9526 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9527 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9528 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9529}
9530
9531
9532/**
9533 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9534 */
9535HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9536{
9537 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9538
9539 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9540 {
9541 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9542 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9543 }
9544 return vmxHCExitHlt(pVCpu, pVmxTransient);
9545}
9546
9547
9548/**
9549 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9550 */
9551HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9552{
9553 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9554
9555 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9556 {
9557 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9558 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9559
9560 VMXVEXITINFO ExitInfo;
9561 RT_ZERO(ExitInfo);
9562 ExitInfo.uReason = pVmxTransient->uExitReason;
9563 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9564 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9565 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9566 }
9567 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9568}
9569
9570
9571/**
9572 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9573 */
9574HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9575{
9576 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9577
9578 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9579 {
9580 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9581 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9582 }
9583 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9584}
9585
9586
9587/**
9588 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9589 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9590 */
9591HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9592{
9593 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9594
9595 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9596 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9597
9598 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9599
9600 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9601 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9602 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9603
9604 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9605 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9606 u64VmcsField &= UINT64_C(0xffffffff);
9607
9608 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9609 {
9610 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9611 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9612
9613 VMXVEXITINFO ExitInfo;
9614 RT_ZERO(ExitInfo);
9615 ExitInfo.uReason = pVmxTransient->uExitReason;
9616 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9617 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9618 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9619 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9620 }
9621
9622 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9623 return vmxHCExitVmread(pVCpu, pVmxTransient);
9624 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9625}
9626
9627
9628/**
9629 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9630 */
9631HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9632{
9633 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9634
9635 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9636 {
9637 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9638 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9639 }
9640
9641 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9642}
9643
9644
9645/**
9646 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9647 * Conditional VM-exit.
9648 */
9649HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9650{
9651 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9652
9653 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9654 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9655
9656 VBOXSTRICTRC rcStrict;
9657 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9658 switch (uAccessType)
9659 {
9660 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9661 {
9662 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9663 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9664 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9665 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9666
9667 bool fIntercept;
9668 switch (iCrReg)
9669 {
9670 case 0:
9671 case 4:
9672 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9673 break;
9674
9675 case 3:
9676 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9677 break;
9678
9679 case 8:
9680 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9681 break;
9682
9683 default:
9684 fIntercept = false;
9685 break;
9686 }
9687 if (fIntercept)
9688 {
9689 VMXVEXITINFO ExitInfo;
9690 RT_ZERO(ExitInfo);
9691 ExitInfo.uReason = pVmxTransient->uExitReason;
9692 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9693 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9694 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9695 }
9696 else
9697 {
9698 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9699 AssertRCReturn(rc, rc);
9700 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9701 }
9702 break;
9703 }
9704
9705 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9706 {
9707 /*
9708 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9709 * CR2 reads do not cause a VM-exit.
9710 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9711 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9712 */
9713 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9714 if ( iCrReg == 3
9715 || iCrReg == 8)
9716 {
9717 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9718 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9719 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9720 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9721 {
9722 VMXVEXITINFO ExitInfo;
9723 RT_ZERO(ExitInfo);
9724 ExitInfo.uReason = pVmxTransient->uExitReason;
9725 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9726 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9727 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9728 }
9729 else
9730 {
9731 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9732 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9733 }
9734 }
9735 else
9736 {
9737 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9738 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9739 }
9740 break;
9741 }
9742
9743 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9744 {
9745 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9746 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9747 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9748 if ( (uGstHostMask & X86_CR0_TS)
9749 && (uReadShadow & X86_CR0_TS))
9750 {
9751 VMXVEXITINFO ExitInfo;
9752 RT_ZERO(ExitInfo);
9753 ExitInfo.uReason = pVmxTransient->uExitReason;
9754 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9755 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9756 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9757 }
9758 else
9759 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9760 break;
9761 }
9762
9763 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9764 {
9765 RTGCPTR GCPtrEffDst;
9766 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9767 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9768 if (fMemOperand)
9769 {
9770 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9771 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9772 }
9773 else
9774 GCPtrEffDst = NIL_RTGCPTR;
9775
9776 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9777 {
9778 VMXVEXITINFO ExitInfo;
9779 RT_ZERO(ExitInfo);
9780 ExitInfo.uReason = pVmxTransient->uExitReason;
9781 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9782 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9783 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9784 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9785 }
9786 else
9787 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9788 break;
9789 }
9790
9791 default:
9792 {
9793 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9794 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9795 }
9796 }
9797
9798 if (rcStrict == VINF_IEM_RAISED_XCPT)
9799 {
9800 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9801 rcStrict = VINF_SUCCESS;
9802 }
9803 return rcStrict;
9804}
9805
9806
9807/**
9808 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9809 * Conditional VM-exit.
9810 */
9811HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9812{
9813 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9814
9815 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9816 {
9817 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9818 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9819
9820 VMXVEXITINFO ExitInfo;
9821 RT_ZERO(ExitInfo);
9822 ExitInfo.uReason = pVmxTransient->uExitReason;
9823 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9824 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9825 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9826 }
9827 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9828}
9829
9830
9831/**
9832 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9833 * Conditional VM-exit.
9834 */
9835HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9836{
9837 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9838
9839 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9840
9841 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9842 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9843 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9844
9845 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9846 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9847 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9848 {
9849 /*
9850 * IN/OUT instruction:
9851 * - Provides VM-exit instruction length.
9852 *
9853 * INS/OUTS instruction:
9854 * - Provides VM-exit instruction length.
9855 * - Provides Guest-linear address.
9856 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9857 */
9858 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9859 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9860
9861 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9862 pVmxTransient->ExitInstrInfo.u = 0;
9863 pVmxTransient->uGuestLinearAddr = 0;
9864
9865 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9866 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9867 if (fIOString)
9868 {
9869 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9870 if (fVmxInsOutsInfo)
9871 {
9872 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9873 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9874 }
9875 }
9876
9877 VMXVEXITINFO ExitInfo;
9878 RT_ZERO(ExitInfo);
9879 ExitInfo.uReason = pVmxTransient->uExitReason;
9880 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9881 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9882 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9883 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9884 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9885 }
9886 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9887}
9888
9889
9890/**
9891 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9892 */
9893HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9894{
9895 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9896
9897 uint32_t fMsrpm;
9898 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9899 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9900 else
9901 fMsrpm = VMXMSRPM_EXIT_RD;
9902
9903 if (fMsrpm & VMXMSRPM_EXIT_RD)
9904 {
9905 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9906 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9907 }
9908 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9909}
9910
9911
9912/**
9913 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9914 */
9915HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9916{
9917 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9918
9919 uint32_t fMsrpm;
9920 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9921 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9922 else
9923 fMsrpm = VMXMSRPM_EXIT_WR;
9924
9925 if (fMsrpm & VMXMSRPM_EXIT_WR)
9926 {
9927 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9928 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9929 }
9930 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9931}
9932
9933
9934/**
9935 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9936 */
9937HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9938{
9939 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9940
9941 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9942 {
9943 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9944 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9945 }
9946 return vmxHCExitMwait(pVCpu, pVmxTransient);
9947}
9948
9949
9950/**
9951 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9952 * VM-exit.
9953 */
9954HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9955{
9956 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9957
9958 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9959 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9960 VMXVEXITINFO ExitInfo;
9961 RT_ZERO(ExitInfo);
9962 ExitInfo.uReason = pVmxTransient->uExitReason;
9963 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9964 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9965}
9966
9967
9968/**
9969 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9970 */
9971HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9972{
9973 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9974
9975 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9976 {
9977 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9978 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9979 }
9980 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9981}
9982
9983
9984/**
9985 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9986 */
9987HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9988{
9989 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9990
9991 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9992 * PAUSE when executing a nested-guest? If it does not, we would not need
9993 * to check for the intercepts here. Just call VM-exit... */
9994
9995 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9996 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9997 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9998 {
9999 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10000 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10001 }
10002 return vmxHCExitPause(pVCpu, pVmxTransient);
10003}
10004
10005
10006/**
10007 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10008 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10009 */
10010HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10011{
10012 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10013
10014 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10015 {
10016 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
10017 VMXVEXITINFO ExitInfo;
10018 RT_ZERO(ExitInfo);
10019 ExitInfo.uReason = pVmxTransient->uExitReason;
10020 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
10021 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10022 }
10023 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10024}
10025
10026
10027/**
10028 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10029 * VM-exit.
10030 */
10031HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10032{
10033 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10034
10035 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10036 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10037 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10038 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10039
10040 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10041
10042 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10043 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10044
10045 VMXVEXITINFO ExitInfo;
10046 RT_ZERO(ExitInfo);
10047 ExitInfo.uReason = pVmxTransient->uExitReason;
10048 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10049 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10050
10051 VMXVEXITEVENTINFO ExitEventInfo;
10052 RT_ZERO(ExitEventInfo);
10053 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10054 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10055 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10056}
10057
10058
10059/**
10060 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10061 * Conditional VM-exit.
10062 */
10063HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10064{
10065 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10066
10067 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10068 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10069 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10070}
10071
10072
10073/**
10074 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10075 * Conditional VM-exit.
10076 */
10077HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10078{
10079 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10080
10081 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10082 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10083 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10084}
10085
10086
10087/**
10088 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10089 */
10090HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10091{
10092 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10093
10094 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10095 {
10096 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10097 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10098 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10099 }
10100 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10101}
10102
10103
10104/**
10105 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10106 */
10107HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10108{
10109 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10110
10111 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10112 {
10113 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10114 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10115 }
10116 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10117}
10118
10119
10120/**
10121 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10122 */
10123HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10124{
10125 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10126
10127 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10128 {
10129 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10130 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10131 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10132 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10133
10134 VMXVEXITINFO ExitInfo;
10135 RT_ZERO(ExitInfo);
10136 ExitInfo.uReason = pVmxTransient->uExitReason;
10137 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10138 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10139 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10140 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10141 }
10142 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10143}
10144
10145
10146/**
10147 * Nested-guest VM-exit handler for invalid-guest state
10148 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10149 */
10150HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10151{
10152 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10153
10154 /*
10155 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10156 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10157 * Handle it like it's in an invalid guest state of the outer guest.
10158 *
10159 * When the fast path is implemented, this should be changed to cause the corresponding
10160 * nested-guest VM-exit.
10161 */
10162 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10163}
10164
10165
10166/**
10167 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10168 * and only provide the instruction length.
10169 *
10170 * Unconditional VM-exit.
10171 */
10172HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10173{
10174 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10175
10176#ifdef VBOX_STRICT
10177 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10178 switch (pVmxTransient->uExitReason)
10179 {
10180 case VMX_EXIT_ENCLS:
10181 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10182 break;
10183
10184 case VMX_EXIT_VMFUNC:
10185 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10186 break;
10187 }
10188#endif
10189
10190 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10191 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10192}
10193
10194
10195/**
10196 * Nested-guest VM-exit handler for instructions that provide instruction length as
10197 * well as more information.
10198 *
10199 * Unconditional VM-exit.
10200 */
10201HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10202{
10203 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10204
10205#ifdef VBOX_STRICT
10206 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10207 switch (pVmxTransient->uExitReason)
10208 {
10209 case VMX_EXIT_GDTR_IDTR_ACCESS:
10210 case VMX_EXIT_LDTR_TR_ACCESS:
10211 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10212 break;
10213
10214 case VMX_EXIT_RDRAND:
10215 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10216 break;
10217
10218 case VMX_EXIT_RDSEED:
10219 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10220 break;
10221
10222 case VMX_EXIT_XSAVES:
10223 case VMX_EXIT_XRSTORS:
10224 /** @todo NSTVMX: Verify XSS-bitmap. */
10225 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10226 break;
10227
10228 case VMX_EXIT_UMWAIT:
10229 case VMX_EXIT_TPAUSE:
10230 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10231 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10232 break;
10233
10234 case VMX_EXIT_LOADIWKEY:
10235 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10236 break;
10237 }
10238#endif
10239
10240 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10241 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10242 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10243
10244 VMXVEXITINFO ExitInfo;
10245 RT_ZERO(ExitInfo);
10246 ExitInfo.uReason = pVmxTransient->uExitReason;
10247 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10248 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10249 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10250 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10251}
10252
10253
10254# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10255/**
10256 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10257 * Conditional VM-exit.
10258 */
10259HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10260{
10261 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10262 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10263
10264 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10265 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10266 {
10267 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10268 AssertRCReturn(rc, rc);
10269
10270 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10271 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10272 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10273
10274 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10275 uint64_t const uExitQual = pVmxTransient->uExitQual;
10276
10277 RTGCPTR GCPtrNested;
10278 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10279 if (fIsLinearAddrValid)
10280 {
10281 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10282 GCPtrNested = pVmxTransient->uGuestLinearAddr;
10283 }
10284 else
10285 GCPtrNested = 0;
10286
10287 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10288 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10289 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10290 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10291 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10292
10293 PGMPTWALK Walk;
10294 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10295 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx), GCPhysNested,
10296 fIsLinearAddrValid, GCPtrNested, &Walk);
10297 if (RT_SUCCESS(rcStrict))
10298 {
10299 if (rcStrict == VINF_SUCCESS)
10300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10301 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10302 {
10303 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10304 rcStrict = VINF_SUCCESS;
10305 }
10306 return rcStrict;
10307 }
10308
10309 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10310 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10311
10312 VMXVEXITEVENTINFO ExitEventInfo;
10313 RT_ZERO(ExitEventInfo);
10314 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10315 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10316
10317 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10318 {
10319 VMXVEXITINFO ExitInfo;
10320 RT_ZERO(ExitInfo);
10321 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION;
10322 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10323 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10324 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
10325 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr;
10326 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10327 }
10328
10329 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10330 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10331 }
10332
10333 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10334}
10335
10336
10337/**
10338 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10339 * Conditional VM-exit.
10340 */
10341HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10342{
10343 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10344 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10345
10346 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10347 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10348 {
10349 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10350 AssertRCReturn(rc, rc);
10351
10352 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10353
10354 PGMPTWALK Walk;
10355 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10356 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10357 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10358 GCPhysNested, false /* fIsLinearAddrValid */,
10359 0 /* GCPtrNested*/, &Walk);
10360 if (RT_SUCCESS(rcStrict))
10361 return VINF_EM_RAW_EMULATE_INSTR;
10362
10363 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10364 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10365
10366 VMXVEXITEVENTINFO ExitEventInfo;
10367 RT_ZERO(ExitEventInfo);
10368 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10369 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10370
10371 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10372 }
10373
10374 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10375}
10376# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10377
10378/** @} */
10379#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10380
10381
10382/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10383 * probes.
10384 *
10385 * The following few functions and associated structure contains the bloat
10386 * necessary for providing detailed debug events and dtrace probes as well as
10387 * reliable host side single stepping. This works on the principle of
10388 * "subclassing" the normal execution loop and workers. We replace the loop
10389 * method completely and override selected helpers to add necessary adjustments
10390 * to their core operation.
10391 *
10392 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10393 * any performance for debug and analysis features.
10394 *
10395 * @{
10396 */
10397
10398/**
10399 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10400 * the debug run loop.
10401 */
10402typedef struct VMXRUNDBGSTATE
10403{
10404 /** The RIP we started executing at. This is for detecting that we stepped. */
10405 uint64_t uRipStart;
10406 /** The CS we started executing with. */
10407 uint16_t uCsStart;
10408
10409 /** Whether we've actually modified the 1st execution control field. */
10410 bool fModifiedProcCtls : 1;
10411 /** Whether we've actually modified the 2nd execution control field. */
10412 bool fModifiedProcCtls2 : 1;
10413 /** Whether we've actually modified the exception bitmap. */
10414 bool fModifiedXcptBitmap : 1;
10415
10416 /** We desire the modified the CR0 mask to be cleared. */
10417 bool fClearCr0Mask : 1;
10418 /** We desire the modified the CR4 mask to be cleared. */
10419 bool fClearCr4Mask : 1;
10420 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10421 uint32_t fCpe1Extra;
10422 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10423 uint32_t fCpe1Unwanted;
10424 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10425 uint32_t fCpe2Extra;
10426 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10427 uint32_t bmXcptExtra;
10428 /** The sequence number of the Dtrace provider settings the state was
10429 * configured against. */
10430 uint32_t uDtraceSettingsSeqNo;
10431 /** VM-exits to check (one bit per VM-exit). */
10432 uint32_t bmExitsToCheck[3];
10433
10434 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10435 uint32_t fProcCtlsInitial;
10436 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10437 uint32_t fProcCtls2Initial;
10438 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10439 uint32_t bmXcptInitial;
10440} VMXRUNDBGSTATE;
10441AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10442typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10443
10444
10445/**
10446 * Initializes the VMXRUNDBGSTATE structure.
10447 *
10448 * @param pVCpu The cross context virtual CPU structure of the
10449 * calling EMT.
10450 * @param pVmxTransient The VMX-transient structure.
10451 * @param pDbgState The debug state to initialize.
10452 */
10453static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10454{
10455 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10456 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10457
10458 pDbgState->fModifiedProcCtls = false;
10459 pDbgState->fModifiedProcCtls2 = false;
10460 pDbgState->fModifiedXcptBitmap = false;
10461 pDbgState->fClearCr0Mask = false;
10462 pDbgState->fClearCr4Mask = false;
10463 pDbgState->fCpe1Extra = 0;
10464 pDbgState->fCpe1Unwanted = 0;
10465 pDbgState->fCpe2Extra = 0;
10466 pDbgState->bmXcptExtra = 0;
10467 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10468 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10469 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10470}
10471
10472
10473/**
10474 * Updates the VMSC fields with changes requested by @a pDbgState.
10475 *
10476 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10477 * immediately before executing guest code, i.e. when interrupts are disabled.
10478 * We don't check status codes here as we cannot easily assert or return in the
10479 * latter case.
10480 *
10481 * @param pVCpu The cross context virtual CPU structure.
10482 * @param pVmxTransient The VMX-transient structure.
10483 * @param pDbgState The debug state.
10484 */
10485static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10486{
10487 /*
10488 * Ensure desired flags in VMCS control fields are set.
10489 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10490 *
10491 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10492 * there should be no stale data in pCtx at this point.
10493 */
10494 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10495 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10496 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10497 {
10498 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10499 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10500 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10501 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10502 pDbgState->fModifiedProcCtls = true;
10503 }
10504
10505 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10506 {
10507 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10508 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10509 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10510 pDbgState->fModifiedProcCtls2 = true;
10511 }
10512
10513 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10514 {
10515 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10516 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10517 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10518 pDbgState->fModifiedXcptBitmap = true;
10519 }
10520
10521 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10522 {
10523 pVmcsInfo->u64Cr0Mask = 0;
10524 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10525 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10526 }
10527
10528 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10529 {
10530 pVmcsInfo->u64Cr4Mask = 0;
10531 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10532 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10533 }
10534
10535 NOREF(pVCpu);
10536}
10537
10538
10539/**
10540 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10541 * re-entry next time around.
10542 *
10543 * @returns Strict VBox status code (i.e. informational status codes too).
10544 * @param pVCpu The cross context virtual CPU structure.
10545 * @param pVmxTransient The VMX-transient structure.
10546 * @param pDbgState The debug state.
10547 * @param rcStrict The return code from executing the guest using single
10548 * stepping.
10549 */
10550static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10551 VBOXSTRICTRC rcStrict)
10552{
10553 /*
10554 * Restore VM-exit control settings as we may not reenter this function the
10555 * next time around.
10556 */
10557 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10558
10559 /* We reload the initial value, trigger what we can of recalculations the
10560 next time around. From the looks of things, that's all that's required atm. */
10561 if (pDbgState->fModifiedProcCtls)
10562 {
10563 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10564 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10565 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10566 AssertRC(rc2);
10567 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10568 }
10569
10570 /* We're currently the only ones messing with this one, so just restore the
10571 cached value and reload the field. */
10572 if ( pDbgState->fModifiedProcCtls2
10573 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10574 {
10575 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10576 AssertRC(rc2);
10577 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10578 }
10579
10580 /* If we've modified the exception bitmap, we restore it and trigger
10581 reloading and partial recalculation the next time around. */
10582 if (pDbgState->fModifiedXcptBitmap)
10583 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10584
10585 return rcStrict;
10586}
10587
10588
10589/**
10590 * Configures VM-exit controls for current DBGF and DTrace settings.
10591 *
10592 * This updates @a pDbgState and the VMCS execution control fields to reflect
10593 * the necessary VM-exits demanded by DBGF and DTrace.
10594 *
10595 * @param pVCpu The cross context virtual CPU structure.
10596 * @param pVmxTransient The VMX-transient structure. May update
10597 * fUpdatedTscOffsettingAndPreemptTimer.
10598 * @param pDbgState The debug state.
10599 */
10600static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10601{
10602#ifndef IN_NEM_DARWIN
10603 /*
10604 * Take down the dtrace serial number so we can spot changes.
10605 */
10606 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10607 ASMCompilerBarrier();
10608#endif
10609
10610 /*
10611 * We'll rebuild most of the middle block of data members (holding the
10612 * current settings) as we go along here, so start by clearing it all.
10613 */
10614 pDbgState->bmXcptExtra = 0;
10615 pDbgState->fCpe1Extra = 0;
10616 pDbgState->fCpe1Unwanted = 0;
10617 pDbgState->fCpe2Extra = 0;
10618 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10619 pDbgState->bmExitsToCheck[i] = 0;
10620
10621 /*
10622 * Software interrupts (INT XXh) - no idea how to trigger these...
10623 */
10624 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10625 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10626 || VBOXVMM_INT_SOFTWARE_ENABLED())
10627 {
10628 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10629 }
10630
10631 /*
10632 * INT3 breakpoints - triggered by #BP exceptions.
10633 */
10634 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10635 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10636
10637 /*
10638 * Exception bitmap and XCPT events+probes.
10639 */
10640 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10641 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10642 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10643
10644 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10645 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10646 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10647 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10648 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10649 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10650 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10651 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10652 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10653 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10654 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10655 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10656 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10657 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10658 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10659 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10660 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10661 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10662
10663 if (pDbgState->bmXcptExtra)
10664 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10665
10666 /*
10667 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10668 *
10669 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10670 * So, when adding/changing/removing please don't forget to update it.
10671 *
10672 * Some of the macros are picking up local variables to save horizontal space,
10673 * (being able to see it in a table is the lesser evil here).
10674 */
10675#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10676 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10677 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10678#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10679 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10680 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10681 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10682 } else do { } while (0)
10683#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10684 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10685 { \
10686 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10687 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10688 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10689 } else do { } while (0)
10690#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10691 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10692 { \
10693 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10694 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10695 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10696 } else do { } while (0)
10697#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10698 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10699 { \
10700 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10701 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10702 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10703 } else do { } while (0)
10704
10705 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10706 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10707 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10708 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10709 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10710
10711 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10712 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10713 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10714 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10715 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10716 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10717 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10718 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10719 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10720 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10721 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10722 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10723 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10724 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10725 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10726 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10727 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10728 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10729 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10730 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10731 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10732 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10733 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10734 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10735 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10736 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10737 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10738 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10739 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10740 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10741 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10742 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10743 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10744 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10745 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10746 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10747
10748 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10749 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10750 {
10751 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10752 | CPUMCTX_EXTRN_APIC_TPR);
10753 AssertRC(rc);
10754
10755#if 0 /** @todo fix me */
10756 pDbgState->fClearCr0Mask = true;
10757 pDbgState->fClearCr4Mask = true;
10758#endif
10759 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10760 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10761 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10762 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10763 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10764 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10765 require clearing here and in the loop if we start using it. */
10766 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10767 }
10768 else
10769 {
10770 if (pDbgState->fClearCr0Mask)
10771 {
10772 pDbgState->fClearCr0Mask = false;
10773 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10774 }
10775 if (pDbgState->fClearCr4Mask)
10776 {
10777 pDbgState->fClearCr4Mask = false;
10778 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10779 }
10780 }
10781 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10782 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10783
10784 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10785 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10786 {
10787 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10788 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10789 }
10790 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10791 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10792
10793 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10794 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10795 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10796 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10797 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10798 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10799 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10800 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10801#if 0 /** @todo too slow, fix handler. */
10802 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10803#endif
10804 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10805
10806 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10807 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10808 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10809 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10810 {
10811 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10812 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10813 }
10814 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10815 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10816 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10817 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10818
10819 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10820 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10821 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10822 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10823 {
10824 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10825 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10826 }
10827 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10828 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10829 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10830 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10831
10832 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10833 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10834 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10835 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10836 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10837 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10838 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10839 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10840 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10841 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10842 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10843 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10844 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10845 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10846 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10847 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10848 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10849 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10850 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10851 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10852 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10853 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10854
10855#undef IS_EITHER_ENABLED
10856#undef SET_ONLY_XBM_IF_EITHER_EN
10857#undef SET_CPE1_XBM_IF_EITHER_EN
10858#undef SET_CPEU_XBM_IF_EITHER_EN
10859#undef SET_CPE2_XBM_IF_EITHER_EN
10860
10861 /*
10862 * Sanitize the control stuff.
10863 */
10864 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10865 if (pDbgState->fCpe2Extra)
10866 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10867 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10868 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10869#ifndef IN_NEM_DARWIN
10870 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10871 {
10872 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10873 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10874 }
10875#else
10876 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10877 {
10878 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10879 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10880 }
10881#endif
10882
10883 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10884 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10885 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10886 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10887}
10888
10889
10890/**
10891 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10892 * appropriate.
10893 *
10894 * The caller has checked the VM-exit against the
10895 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10896 * already, so we don't have to do that either.
10897 *
10898 * @returns Strict VBox status code (i.e. informational status codes too).
10899 * @param pVCpu The cross context virtual CPU structure.
10900 * @param pVmxTransient The VMX-transient structure.
10901 * @param uExitReason The VM-exit reason.
10902 *
10903 * @remarks The name of this function is displayed by dtrace, so keep it short
10904 * and to the point. No longer than 33 chars long, please.
10905 */
10906static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10907{
10908 /*
10909 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10910 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10911 *
10912 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10913 * does. Must add/change/remove both places. Same ordering, please.
10914 *
10915 * Added/removed events must also be reflected in the next section
10916 * where we dispatch dtrace events.
10917 */
10918 bool fDtrace1 = false;
10919 bool fDtrace2 = false;
10920 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10921 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10922 uint32_t uEventArg = 0;
10923#define SET_EXIT(a_EventSubName) \
10924 do { \
10925 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10926 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10927 } while (0)
10928#define SET_BOTH(a_EventSubName) \
10929 do { \
10930 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10931 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10932 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10933 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10934 } while (0)
10935 switch (uExitReason)
10936 {
10937 case VMX_EXIT_MTF:
10938 return vmxHCExitMtf(pVCpu, pVmxTransient);
10939
10940 case VMX_EXIT_XCPT_OR_NMI:
10941 {
10942 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10943 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10944 {
10945 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10946 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10947 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10948 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10949 {
10950 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10951 {
10952 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10953 uEventArg = pVmxTransient->uExitIntErrorCode;
10954 }
10955 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10956 switch (enmEvent1)
10957 {
10958 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10959 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10960 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10961 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10962 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10963 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10964 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10965 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10966 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10967 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10968 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10969 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10970 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10971 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10972 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10973 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10974 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10975 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10976 default: break;
10977 }
10978 }
10979 else
10980 AssertFailed();
10981 break;
10982
10983 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10984 uEventArg = idxVector;
10985 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10986 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10987 break;
10988 }
10989 break;
10990 }
10991
10992 case VMX_EXIT_TRIPLE_FAULT:
10993 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
10994 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
10995 break;
10996 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
10997 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
10998 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
10999 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11000 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11001
11002 /* Instruction specific VM-exits: */
11003 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11004 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11005 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11006 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11007 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11008 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11009 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11010 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11011 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11012 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11013 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11014 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11015 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11016 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11017 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11018 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11019 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11020 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11021 case VMX_EXIT_MOV_CRX:
11022 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11023 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11024 SET_BOTH(CRX_READ);
11025 else
11026 SET_BOTH(CRX_WRITE);
11027 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11028 break;
11029 case VMX_EXIT_MOV_DRX:
11030 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11031 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11032 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11033 SET_BOTH(DRX_READ);
11034 else
11035 SET_BOTH(DRX_WRITE);
11036 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11037 break;
11038 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11039 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11040 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11041 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11042 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11043 case VMX_EXIT_GDTR_IDTR_ACCESS:
11044 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11045 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11046 {
11047 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11048 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11049 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11050 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11051 }
11052 break;
11053
11054 case VMX_EXIT_LDTR_TR_ACCESS:
11055 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11056 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11057 {
11058 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11059 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11060 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11061 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11062 }
11063 break;
11064
11065 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11066 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11067 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11068 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11069 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11070 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11071 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11072 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11073 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11074 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11075 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11076
11077 /* Events that aren't relevant at this point. */
11078 case VMX_EXIT_EXT_INT:
11079 case VMX_EXIT_INT_WINDOW:
11080 case VMX_EXIT_NMI_WINDOW:
11081 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11082 case VMX_EXIT_PREEMPT_TIMER:
11083 case VMX_EXIT_IO_INSTR:
11084 break;
11085
11086 /* Errors and unexpected events. */
11087 case VMX_EXIT_INIT_SIGNAL:
11088 case VMX_EXIT_SIPI:
11089 case VMX_EXIT_IO_SMI:
11090 case VMX_EXIT_SMI:
11091 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11092 case VMX_EXIT_ERR_MSR_LOAD:
11093 case VMX_EXIT_ERR_MACHINE_CHECK:
11094 case VMX_EXIT_PML_FULL:
11095 case VMX_EXIT_VIRTUALIZED_EOI:
11096 break;
11097
11098 default:
11099 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11100 break;
11101 }
11102#undef SET_BOTH
11103#undef SET_EXIT
11104
11105 /*
11106 * Dtrace tracepoints go first. We do them here at once so we don't
11107 * have to copy the guest state saving and stuff a few dozen times.
11108 * Down side is that we've got to repeat the switch, though this time
11109 * we use enmEvent since the probes are a subset of what DBGF does.
11110 */
11111 if (fDtrace1 || fDtrace2)
11112 {
11113 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11114 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11115 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11116 switch (enmEvent1)
11117 {
11118 /** @todo consider which extra parameters would be helpful for each probe. */
11119 case DBGFEVENT_END: break;
11120 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11121 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11122 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11123 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11124 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11125 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11126 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11127 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11128 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11129 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11130 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11131 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11132 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11133 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11134 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11135 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11136 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11137 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11138 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11139 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11140 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11141 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11142 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11143 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11144 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11145 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11146 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11147 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11148 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11149 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11150 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11151 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11152 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11153 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11154 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11155 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11156 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11157 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11158 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11159 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11160 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11161 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11162 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11163 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11164 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11165 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11166 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11167 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11168 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11169 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11170 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11171 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11172 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11173 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11174 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11175 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11176 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11177 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11178 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11179 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11180 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11181 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11182 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11183 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11184 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11185 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11186 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11187 }
11188 switch (enmEvent2)
11189 {
11190 /** @todo consider which extra parameters would be helpful for each probe. */
11191 case DBGFEVENT_END: break;
11192 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11193 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11194 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11195 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11196 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11197 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11198 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11199 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11200 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11201 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11202 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11203 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11204 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11205 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11206 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11207 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11208 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11209 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11210 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11211 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11212 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11213 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11214 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11215 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11216 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11217 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11218 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11219 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11220 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11221 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11222 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11223 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11224 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11225 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11226 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11227 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11228 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11229 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11230 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11231 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11232 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11233 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11234 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11235 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11236 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11237 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11238 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11239 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11240 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11241 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11242 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11243 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11244 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11245 }
11246 }
11247
11248 /*
11249 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11250 * the DBGF call will do a full check).
11251 *
11252 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11253 * Note! If we have to events, we prioritize the first, i.e. the instruction
11254 * one, in order to avoid event nesting.
11255 */
11256 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11257 if ( enmEvent1 != DBGFEVENT_END
11258 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11259 {
11260 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11261 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11262 if (rcStrict != VINF_SUCCESS)
11263 return rcStrict;
11264 }
11265 else if ( enmEvent2 != DBGFEVENT_END
11266 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11267 {
11268 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11269 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11270 if (rcStrict != VINF_SUCCESS)
11271 return rcStrict;
11272 }
11273
11274 return VINF_SUCCESS;
11275}
11276
11277
11278/**
11279 * Single-stepping VM-exit filtering.
11280 *
11281 * This is preprocessing the VM-exits and deciding whether we've gotten far
11282 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11283 * handling is performed.
11284 *
11285 * @returns Strict VBox status code (i.e. informational status codes too).
11286 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11287 * @param pVmxTransient The VMX-transient structure.
11288 * @param pDbgState The debug state.
11289 */
11290DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11291{
11292 /*
11293 * Expensive (saves context) generic dtrace VM-exit probe.
11294 */
11295 uint32_t const uExitReason = pVmxTransient->uExitReason;
11296 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11297 { /* more likely */ }
11298 else
11299 {
11300 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11301 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11302 AssertRC(rc);
11303 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11304 }
11305
11306#ifndef IN_NEM_DARWIN
11307 /*
11308 * Check for host NMI, just to get that out of the way.
11309 */
11310 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11311 { /* normally likely */ }
11312 else
11313 {
11314 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11315 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11316 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11317 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11318 }
11319#endif
11320
11321 /*
11322 * Check for single stepping event if we're stepping.
11323 */
11324 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11325 {
11326 switch (uExitReason)
11327 {
11328 case VMX_EXIT_MTF:
11329 return vmxHCExitMtf(pVCpu, pVmxTransient);
11330
11331 /* Various events: */
11332 case VMX_EXIT_XCPT_OR_NMI:
11333 case VMX_EXIT_EXT_INT:
11334 case VMX_EXIT_TRIPLE_FAULT:
11335 case VMX_EXIT_INT_WINDOW:
11336 case VMX_EXIT_NMI_WINDOW:
11337 case VMX_EXIT_TASK_SWITCH:
11338 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11339 case VMX_EXIT_APIC_ACCESS:
11340 case VMX_EXIT_EPT_VIOLATION:
11341 case VMX_EXIT_EPT_MISCONFIG:
11342 case VMX_EXIT_PREEMPT_TIMER:
11343
11344 /* Instruction specific VM-exits: */
11345 case VMX_EXIT_CPUID:
11346 case VMX_EXIT_GETSEC:
11347 case VMX_EXIT_HLT:
11348 case VMX_EXIT_INVD:
11349 case VMX_EXIT_INVLPG:
11350 case VMX_EXIT_RDPMC:
11351 case VMX_EXIT_RDTSC:
11352 case VMX_EXIT_RSM:
11353 case VMX_EXIT_VMCALL:
11354 case VMX_EXIT_VMCLEAR:
11355 case VMX_EXIT_VMLAUNCH:
11356 case VMX_EXIT_VMPTRLD:
11357 case VMX_EXIT_VMPTRST:
11358 case VMX_EXIT_VMREAD:
11359 case VMX_EXIT_VMRESUME:
11360 case VMX_EXIT_VMWRITE:
11361 case VMX_EXIT_VMXOFF:
11362 case VMX_EXIT_VMXON:
11363 case VMX_EXIT_MOV_CRX:
11364 case VMX_EXIT_MOV_DRX:
11365 case VMX_EXIT_IO_INSTR:
11366 case VMX_EXIT_RDMSR:
11367 case VMX_EXIT_WRMSR:
11368 case VMX_EXIT_MWAIT:
11369 case VMX_EXIT_MONITOR:
11370 case VMX_EXIT_PAUSE:
11371 case VMX_EXIT_GDTR_IDTR_ACCESS:
11372 case VMX_EXIT_LDTR_TR_ACCESS:
11373 case VMX_EXIT_INVEPT:
11374 case VMX_EXIT_RDTSCP:
11375 case VMX_EXIT_INVVPID:
11376 case VMX_EXIT_WBINVD:
11377 case VMX_EXIT_XSETBV:
11378 case VMX_EXIT_RDRAND:
11379 case VMX_EXIT_INVPCID:
11380 case VMX_EXIT_VMFUNC:
11381 case VMX_EXIT_RDSEED:
11382 case VMX_EXIT_XSAVES:
11383 case VMX_EXIT_XRSTORS:
11384 {
11385 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11386 AssertRCReturn(rc, rc);
11387 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11388 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11389 return VINF_EM_DBG_STEPPED;
11390 break;
11391 }
11392
11393 /* Errors and unexpected events: */
11394 case VMX_EXIT_INIT_SIGNAL:
11395 case VMX_EXIT_SIPI:
11396 case VMX_EXIT_IO_SMI:
11397 case VMX_EXIT_SMI:
11398 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11399 case VMX_EXIT_ERR_MSR_LOAD:
11400 case VMX_EXIT_ERR_MACHINE_CHECK:
11401 case VMX_EXIT_PML_FULL:
11402 case VMX_EXIT_VIRTUALIZED_EOI:
11403 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11404 break;
11405
11406 default:
11407 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11408 break;
11409 }
11410 }
11411
11412 /*
11413 * Check for debugger event breakpoints and dtrace probes.
11414 */
11415 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11416 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11417 {
11418 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11419 if (rcStrict != VINF_SUCCESS)
11420 return rcStrict;
11421 }
11422
11423 /*
11424 * Normal processing.
11425 */
11426#ifdef HMVMX_USE_FUNCTION_TABLE
11427 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11428#else
11429 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11430#endif
11431}
11432
11433/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette