VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97005

Last change on this file since 97005 was 97005, checked in by vboxsync, 3 years ago

VMM/HMVMXR0: Converted all VMVEXITINFO initialization to use initializers (via macros) and not call memset/RT_ZERO. This may cause the code to be slightly larger but faster, as it allows the compiler to do a better job.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 493.5 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97005 2022-10-05 14:37:41Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41
42/** Use the function table. */
43#define HMVMX_USE_FUNCTION_TABLE
44
45/** Determine which tagged-TLB flush handler to use. */
46#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
47#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
48#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
49#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
50
51/** Assert that all the given fields have been read from the VMCS. */
52#ifdef VBOX_STRICT
53# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
54 do { \
55 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
56 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
57 } while (0)
58#else
59# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
60#endif
61
62/**
63 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
64 * guest using hardware-assisted VMX.
65 *
66 * This excludes state like GPRs (other than RSP) which are always are
67 * swapped and restored across the world-switch and also registers like EFER,
68 * MSR which cannot be modified by the guest without causing a VM-exit.
69 */
70#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
71 | CPUMCTX_EXTRN_RFLAGS \
72 | CPUMCTX_EXTRN_RSP \
73 | CPUMCTX_EXTRN_SREG_MASK \
74 | CPUMCTX_EXTRN_TABLE_MASK \
75 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
76 | CPUMCTX_EXTRN_SYSCALL_MSRS \
77 | CPUMCTX_EXTRN_SYSENTER_MSRS \
78 | CPUMCTX_EXTRN_TSC_AUX \
79 | CPUMCTX_EXTRN_OTHER_MSRS \
80 | CPUMCTX_EXTRN_CR0 \
81 | CPUMCTX_EXTRN_CR3 \
82 | CPUMCTX_EXTRN_CR4 \
83 | CPUMCTX_EXTRN_DR7 \
84 | CPUMCTX_EXTRN_HWVIRT \
85 | CPUMCTX_EXTRN_INHIBIT_INT \
86 | CPUMCTX_EXTRN_INHIBIT_NMI)
87
88/**
89 * Exception bitmap mask for real-mode guests (real-on-v86).
90 *
91 * We need to intercept all exceptions manually except:
92 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
93 * due to bugs in Intel CPUs.
94 * - \#PF need not be intercepted even in real-mode if we have nested paging
95 * support.
96 */
97#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
98 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
99 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
100 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
101 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
102 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
103 | RT_BIT(X86_XCPT_XF))
104
105/** Maximum VM-instruction error number. */
106#define HMVMX_INSTR_ERROR_MAX 28
107
108/** Profiling macro. */
109#ifdef HM_PROFILE_EXIT_DISPATCH
110# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
111# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
112#else
113# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
114# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
115#endif
116
117#ifndef IN_NEM_DARWIN
118/** Assert that preemption is disabled or covered by thread-context hooks. */
119# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
120 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
121
122/** Assert that we haven't migrated CPUs when thread-context hooks are not
123 * used. */
124# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
125 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
126 ("Illegal migration! Entered on CPU %u Current %u\n", \
127 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
128#else
129# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
130# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
131#endif
132
133/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
134 * context. */
135#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
136 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
137 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
138
139/** Log the VM-exit reason with an easily visible marker to identify it in a
140 * potential sea of logging data. */
141#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
142 do { \
143 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
144 HMGetVmxExitName(a_uExitReason))); \
145 } while (0) \
146
147
148/*********************************************************************************************************************************
149* Structures and Typedefs *
150*********************************************************************************************************************************/
151/**
152 * Memory operand read or write access.
153 */
154typedef enum VMXMEMACCESS
155{
156 VMXMEMACCESS_READ = 0,
157 VMXMEMACCESS_WRITE = 1
158} VMXMEMACCESS;
159
160
161/**
162 * VMX VM-exit handler.
163 *
164 * @returns Strict VBox status code (i.e. informational status codes too).
165 * @param pVCpu The cross context virtual CPU structure.
166 * @param pVmxTransient The VMX-transient structure.
167 */
168#ifndef HMVMX_USE_FUNCTION_TABLE
169typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
170#else
171typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
172/** Pointer to VM-exit handler. */
173typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
174#endif
175
176/**
177 * VMX VM-exit handler, non-strict status code.
178 *
179 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
180 *
181 * @returns VBox status code, no informational status code returned.
182 * @param pVCpu The cross context virtual CPU structure.
183 * @param pVmxTransient The VMX-transient structure.
184 *
185 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
186 * use of that status code will be replaced with VINF_EM_SOMETHING
187 * later when switching over to IEM.
188 */
189#ifndef HMVMX_USE_FUNCTION_TABLE
190typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
191#else
192typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
193#endif
194
195
196/*********************************************************************************************************************************
197* Internal Functions *
198*********************************************************************************************************************************/
199#ifndef HMVMX_USE_FUNCTION_TABLE
200DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
201# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
202# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
203#else
204# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
205# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
206#endif
207#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
208DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
209#endif
210
211static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
212
213/** @name VM-exit handler prototypes.
214 * @{
215 */
216static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
217static FNVMXEXITHANDLER vmxHCExitExtInt;
218static FNVMXEXITHANDLER vmxHCExitTripleFault;
219static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
220static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
221static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
222static FNVMXEXITHANDLER vmxHCExitCpuid;
223static FNVMXEXITHANDLER vmxHCExitGetsec;
224static FNVMXEXITHANDLER vmxHCExitHlt;
225static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
226static FNVMXEXITHANDLER vmxHCExitInvlpg;
227static FNVMXEXITHANDLER vmxHCExitRdpmc;
228static FNVMXEXITHANDLER vmxHCExitVmcall;
229#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
230static FNVMXEXITHANDLER vmxHCExitVmclear;
231static FNVMXEXITHANDLER vmxHCExitVmlaunch;
232static FNVMXEXITHANDLER vmxHCExitVmptrld;
233static FNVMXEXITHANDLER vmxHCExitVmptrst;
234static FNVMXEXITHANDLER vmxHCExitVmread;
235static FNVMXEXITHANDLER vmxHCExitVmresume;
236static FNVMXEXITHANDLER vmxHCExitVmwrite;
237static FNVMXEXITHANDLER vmxHCExitVmxoff;
238static FNVMXEXITHANDLER vmxHCExitVmxon;
239static FNVMXEXITHANDLER vmxHCExitInvvpid;
240# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
241static FNVMXEXITHANDLER vmxHCExitInvept;
242# endif
243#endif
244static FNVMXEXITHANDLER vmxHCExitRdtsc;
245static FNVMXEXITHANDLER vmxHCExitMovCRx;
246static FNVMXEXITHANDLER vmxHCExitMovDRx;
247static FNVMXEXITHANDLER vmxHCExitIoInstr;
248static FNVMXEXITHANDLER vmxHCExitRdmsr;
249static FNVMXEXITHANDLER vmxHCExitWrmsr;
250static FNVMXEXITHANDLER vmxHCExitMwait;
251static FNVMXEXITHANDLER vmxHCExitMtf;
252static FNVMXEXITHANDLER vmxHCExitMonitor;
253static FNVMXEXITHANDLER vmxHCExitPause;
254static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
255static FNVMXEXITHANDLER vmxHCExitApicAccess;
256static FNVMXEXITHANDLER vmxHCExitEptViolation;
257static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
258static FNVMXEXITHANDLER vmxHCExitRdtscp;
259static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
260static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
261static FNVMXEXITHANDLER vmxHCExitXsetbv;
262static FNVMXEXITHANDLER vmxHCExitInvpcid;
263#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
264static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
265#endif
266static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
267static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
268/** @} */
269
270#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
271/** @name Nested-guest VM-exit handler prototypes.
272 * @{
273 */
274static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
275static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
276static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
277static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
278static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
279static FNVMXEXITHANDLER vmxHCExitHltNested;
280static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
281static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
282static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
283static FNVMXEXITHANDLER vmxHCExitRdtscNested;
284static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
285static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
286static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
287static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
288static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
289static FNVMXEXITHANDLER vmxHCExitMwaitNested;
290static FNVMXEXITHANDLER vmxHCExitMtfNested;
291static FNVMXEXITHANDLER vmxHCExitMonitorNested;
292static FNVMXEXITHANDLER vmxHCExitPauseNested;
293static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
294static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
295static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
296static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
297static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
298static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
299static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
301static FNVMXEXITHANDLER vmxHCExitInstrNested;
302static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
303# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
304static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
305static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
306# endif
307/** @} */
308#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
309
310
311/*********************************************************************************************************************************
312* Global Variables *
313*********************************************************************************************************************************/
314#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
315/**
316 * Array of all VMCS fields.
317 * Any fields added to the VT-x spec. should be added here.
318 *
319 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
320 * of nested-guests.
321 */
322static const uint32_t g_aVmcsFields[] =
323{
324 /* 16-bit control fields. */
325 VMX_VMCS16_VPID,
326 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
327 VMX_VMCS16_EPTP_INDEX,
328
329 /* 16-bit guest-state fields. */
330 VMX_VMCS16_GUEST_ES_SEL,
331 VMX_VMCS16_GUEST_CS_SEL,
332 VMX_VMCS16_GUEST_SS_SEL,
333 VMX_VMCS16_GUEST_DS_SEL,
334 VMX_VMCS16_GUEST_FS_SEL,
335 VMX_VMCS16_GUEST_GS_SEL,
336 VMX_VMCS16_GUEST_LDTR_SEL,
337 VMX_VMCS16_GUEST_TR_SEL,
338 VMX_VMCS16_GUEST_INTR_STATUS,
339 VMX_VMCS16_GUEST_PML_INDEX,
340
341 /* 16-bits host-state fields. */
342 VMX_VMCS16_HOST_ES_SEL,
343 VMX_VMCS16_HOST_CS_SEL,
344 VMX_VMCS16_HOST_SS_SEL,
345 VMX_VMCS16_HOST_DS_SEL,
346 VMX_VMCS16_HOST_FS_SEL,
347 VMX_VMCS16_HOST_GS_SEL,
348 VMX_VMCS16_HOST_TR_SEL,
349
350 /* 64-bit control fields. */
351 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
352 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
353 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
355 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
356 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
357 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
358 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
361 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
364 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
365 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
366 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
367 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
368 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
369 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
370 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
371 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
372 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
373 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
374 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
375 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
376 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
377 VMX_VMCS64_CTRL_EPTP_FULL,
378 VMX_VMCS64_CTRL_EPTP_HIGH,
379 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
380 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
387 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
388 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
389 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
390 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
391 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
394 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
395 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
396 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
397 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_SPPTP_FULL,
400 VMX_VMCS64_CTRL_SPPTP_HIGH,
401 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
402 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
403 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
404 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
405 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
406 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
407
408 /* 64-bit read-only data fields. */
409 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
410 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
411
412 /* 64-bit guest-state fields. */
413 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
414 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
415 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
416 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
417 VMX_VMCS64_GUEST_PAT_FULL,
418 VMX_VMCS64_GUEST_PAT_HIGH,
419 VMX_VMCS64_GUEST_EFER_FULL,
420 VMX_VMCS64_GUEST_EFER_HIGH,
421 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
422 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
423 VMX_VMCS64_GUEST_PDPTE0_FULL,
424 VMX_VMCS64_GUEST_PDPTE0_HIGH,
425 VMX_VMCS64_GUEST_PDPTE1_FULL,
426 VMX_VMCS64_GUEST_PDPTE1_HIGH,
427 VMX_VMCS64_GUEST_PDPTE2_FULL,
428 VMX_VMCS64_GUEST_PDPTE2_HIGH,
429 VMX_VMCS64_GUEST_PDPTE3_FULL,
430 VMX_VMCS64_GUEST_PDPTE3_HIGH,
431 VMX_VMCS64_GUEST_BNDCFGS_FULL,
432 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
433 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
434 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
435 VMX_VMCS64_GUEST_PKRS_FULL,
436 VMX_VMCS64_GUEST_PKRS_HIGH,
437
438 /* 64-bit host-state fields. */
439 VMX_VMCS64_HOST_PAT_FULL,
440 VMX_VMCS64_HOST_PAT_HIGH,
441 VMX_VMCS64_HOST_EFER_FULL,
442 VMX_VMCS64_HOST_EFER_HIGH,
443 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
444 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
445 VMX_VMCS64_HOST_PKRS_FULL,
446 VMX_VMCS64_HOST_PKRS_HIGH,
447
448 /* 32-bit control fields. */
449 VMX_VMCS32_CTRL_PIN_EXEC,
450 VMX_VMCS32_CTRL_PROC_EXEC,
451 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
452 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
453 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
454 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
455 VMX_VMCS32_CTRL_EXIT,
456 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
457 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
458 VMX_VMCS32_CTRL_ENTRY,
459 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
461 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
462 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
463 VMX_VMCS32_CTRL_TPR_THRESHOLD,
464 VMX_VMCS32_CTRL_PROC_EXEC2,
465 VMX_VMCS32_CTRL_PLE_GAP,
466 VMX_VMCS32_CTRL_PLE_WINDOW,
467
468 /* 32-bits read-only fields. */
469 VMX_VMCS32_RO_VM_INSTR_ERROR,
470 VMX_VMCS32_RO_EXIT_REASON,
471 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
472 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
473 VMX_VMCS32_RO_IDT_VECTORING_INFO,
474 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
475 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
476 VMX_VMCS32_RO_EXIT_INSTR_INFO,
477
478 /* 32-bit guest-state fields. */
479 VMX_VMCS32_GUEST_ES_LIMIT,
480 VMX_VMCS32_GUEST_CS_LIMIT,
481 VMX_VMCS32_GUEST_SS_LIMIT,
482 VMX_VMCS32_GUEST_DS_LIMIT,
483 VMX_VMCS32_GUEST_FS_LIMIT,
484 VMX_VMCS32_GUEST_GS_LIMIT,
485 VMX_VMCS32_GUEST_LDTR_LIMIT,
486 VMX_VMCS32_GUEST_TR_LIMIT,
487 VMX_VMCS32_GUEST_GDTR_LIMIT,
488 VMX_VMCS32_GUEST_IDTR_LIMIT,
489 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
490 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
491 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_INT_STATE,
498 VMX_VMCS32_GUEST_ACTIVITY_STATE,
499 VMX_VMCS32_GUEST_SMBASE,
500 VMX_VMCS32_GUEST_SYSENTER_CS,
501 VMX_VMCS32_PREEMPT_TIMER_VALUE,
502
503 /* 32-bit host-state fields. */
504 VMX_VMCS32_HOST_SYSENTER_CS,
505
506 /* Natural-width control fields. */
507 VMX_VMCS_CTRL_CR0_MASK,
508 VMX_VMCS_CTRL_CR4_MASK,
509 VMX_VMCS_CTRL_CR0_READ_SHADOW,
510 VMX_VMCS_CTRL_CR4_READ_SHADOW,
511 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
512 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
515
516 /* Natural-width read-only data fields. */
517 VMX_VMCS_RO_EXIT_QUALIFICATION,
518 VMX_VMCS_RO_IO_RCX,
519 VMX_VMCS_RO_IO_RSI,
520 VMX_VMCS_RO_IO_RDI,
521 VMX_VMCS_RO_IO_RIP,
522 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
523
524 /* Natural-width guest-state field */
525 VMX_VMCS_GUEST_CR0,
526 VMX_VMCS_GUEST_CR3,
527 VMX_VMCS_GUEST_CR4,
528 VMX_VMCS_GUEST_ES_BASE,
529 VMX_VMCS_GUEST_CS_BASE,
530 VMX_VMCS_GUEST_SS_BASE,
531 VMX_VMCS_GUEST_DS_BASE,
532 VMX_VMCS_GUEST_FS_BASE,
533 VMX_VMCS_GUEST_GS_BASE,
534 VMX_VMCS_GUEST_LDTR_BASE,
535 VMX_VMCS_GUEST_TR_BASE,
536 VMX_VMCS_GUEST_GDTR_BASE,
537 VMX_VMCS_GUEST_IDTR_BASE,
538 VMX_VMCS_GUEST_DR7,
539 VMX_VMCS_GUEST_RSP,
540 VMX_VMCS_GUEST_RIP,
541 VMX_VMCS_GUEST_RFLAGS,
542 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
543 VMX_VMCS_GUEST_SYSENTER_ESP,
544 VMX_VMCS_GUEST_SYSENTER_EIP,
545 VMX_VMCS_GUEST_S_CET,
546 VMX_VMCS_GUEST_SSP,
547 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
548
549 /* Natural-width host-state fields */
550 VMX_VMCS_HOST_CR0,
551 VMX_VMCS_HOST_CR3,
552 VMX_VMCS_HOST_CR4,
553 VMX_VMCS_HOST_FS_BASE,
554 VMX_VMCS_HOST_GS_BASE,
555 VMX_VMCS_HOST_TR_BASE,
556 VMX_VMCS_HOST_GDTR_BASE,
557 VMX_VMCS_HOST_IDTR_BASE,
558 VMX_VMCS_HOST_SYSENTER_ESP,
559 VMX_VMCS_HOST_SYSENTER_EIP,
560 VMX_VMCS_HOST_RSP,
561 VMX_VMCS_HOST_RIP,
562 VMX_VMCS_HOST_S_CET,
563 VMX_VMCS_HOST_SSP,
564 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
565};
566#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
567
568#ifdef VBOX_STRICT
569static const uint32_t g_aVmcsSegBase[] =
570{
571 VMX_VMCS_GUEST_ES_BASE,
572 VMX_VMCS_GUEST_CS_BASE,
573 VMX_VMCS_GUEST_SS_BASE,
574 VMX_VMCS_GUEST_DS_BASE,
575 VMX_VMCS_GUEST_FS_BASE,
576 VMX_VMCS_GUEST_GS_BASE
577};
578static const uint32_t g_aVmcsSegSel[] =
579{
580 VMX_VMCS16_GUEST_ES_SEL,
581 VMX_VMCS16_GUEST_CS_SEL,
582 VMX_VMCS16_GUEST_SS_SEL,
583 VMX_VMCS16_GUEST_DS_SEL,
584 VMX_VMCS16_GUEST_FS_SEL,
585 VMX_VMCS16_GUEST_GS_SEL
586};
587static const uint32_t g_aVmcsSegLimit[] =
588{
589 VMX_VMCS32_GUEST_ES_LIMIT,
590 VMX_VMCS32_GUEST_CS_LIMIT,
591 VMX_VMCS32_GUEST_SS_LIMIT,
592 VMX_VMCS32_GUEST_DS_LIMIT,
593 VMX_VMCS32_GUEST_FS_LIMIT,
594 VMX_VMCS32_GUEST_GS_LIMIT
595};
596static const uint32_t g_aVmcsSegAttr[] =
597{
598 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
599 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
600 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
601 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
602 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
603 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
604};
605AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
606AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
607AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
608AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
609#endif /* VBOX_STRICT */
610
611#ifdef HMVMX_USE_FUNCTION_TABLE
612/**
613 * VMX_EXIT dispatch table.
614 */
615static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
616{
617 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
618 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
619 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
620 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
621 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
622 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
623 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
624 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
625 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
626 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
627 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
628 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
629 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
630 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
631 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
632 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
633 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
634 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
635 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
636#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
637 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
638 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
639 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
640 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
641 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
642 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
643 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
644 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
645 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
646#else
647 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
648 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
649 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
650 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
651 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
652 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
653 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
654 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
655 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
658 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
659 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
660 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
661 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
662 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
663 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
664 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
665 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
666 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
667 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
668 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
669 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
670 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
671 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
672 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
673 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
674 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
675 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
676 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
677 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
678 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
679#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
680 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
681#else
682 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
683#endif
684 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
685 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
686#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
687 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
688#else
689 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
690#endif
691 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
692 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
693 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
694 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
695 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
696 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
697 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
698 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
699 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
700 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
701 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
702 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
703 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
704 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
705 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
706 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
707};
708#endif /* HMVMX_USE_FUNCTION_TABLE */
709
710#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
711static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
712{
713 /* 0 */ "(Not Used)",
714 /* 1 */ "VMCALL executed in VMX root operation.",
715 /* 2 */ "VMCLEAR with invalid physical address.",
716 /* 3 */ "VMCLEAR with VMXON pointer.",
717 /* 4 */ "VMLAUNCH with non-clear VMCS.",
718 /* 5 */ "VMRESUME with non-launched VMCS.",
719 /* 6 */ "VMRESUME after VMXOFF",
720 /* 7 */ "VM-entry with invalid control fields.",
721 /* 8 */ "VM-entry with invalid host state fields.",
722 /* 9 */ "VMPTRLD with invalid physical address.",
723 /* 10 */ "VMPTRLD with VMXON pointer.",
724 /* 11 */ "VMPTRLD with incorrect revision identifier.",
725 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
726 /* 13 */ "VMWRITE to read-only VMCS component.",
727 /* 14 */ "(Not Used)",
728 /* 15 */ "VMXON executed in VMX root operation.",
729 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
730 /* 17 */ "VM-entry with non-launched executing VMCS.",
731 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
732 /* 19 */ "VMCALL with non-clear VMCS.",
733 /* 20 */ "VMCALL with invalid VM-exit control fields.",
734 /* 21 */ "(Not Used)",
735 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
736 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
737 /* 24 */ "VMCALL with invalid SMM-monitor features.",
738 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
739 /* 26 */ "VM-entry with events blocked by MOV SS.",
740 /* 27 */ "(Not Used)",
741 /* 28 */ "Invalid operand to INVEPT/INVVPID."
742};
743#endif /* VBOX_STRICT && LOG_ENABLED */
744
745
746/**
747 * Gets the CR0 guest/host mask.
748 *
749 * These bits typically does not change through the lifetime of a VM. Any bit set in
750 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
751 * by the guest.
752 *
753 * @returns The CR0 guest/host mask.
754 * @param pVCpu The cross context virtual CPU structure.
755 */
756static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
757{
758 /*
759 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
760 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
761 *
762 * Furthermore, modifications to any bits that are reserved/unspecified currently
763 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
764 * when future CPUs specify and use currently reserved/unspecified bits.
765 */
766 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
767 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
768 * and @bugref{6944}. */
769 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
770 return ( X86_CR0_PE
771 | X86_CR0_NE
772 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
773 | X86_CR0_PG
774 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
775}
776
777
778/**
779 * Gets the CR4 guest/host mask.
780 *
781 * These bits typically does not change through the lifetime of a VM. Any bit set in
782 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
783 * by the guest.
784 *
785 * @returns The CR4 guest/host mask.
786 * @param pVCpu The cross context virtual CPU structure.
787 */
788static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
789{
790 /*
791 * We construct a mask of all CR4 bits that the guest can modify without causing
792 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
793 * a VM-exit when the guest attempts to modify them when executing using
794 * hardware-assisted VMX.
795 *
796 * When a feature is not exposed to the guest (and may be present on the host),
797 * we want to intercept guest modifications to the bit so we can emulate proper
798 * behavior (e.g., #GP).
799 *
800 * Furthermore, only modifications to those bits that don't require immediate
801 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
802 * depends on CR3 which might not always be the guest value while executing
803 * using hardware-assisted VMX.
804 */
805 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
806 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
807#ifdef IN_NEM_DARWIN
808 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
809#endif
810 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
811
812 /*
813 * Paranoia.
814 * Ensure features exposed to the guest are present on the host.
815 */
816 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
817#ifdef IN_NEM_DARWIN
818 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
819#endif
820 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
821
822 uint64_t const fGstMask = X86_CR4_PVI
823 | X86_CR4_TSD
824 | X86_CR4_DE
825 | X86_CR4_MCE
826 | X86_CR4_PCE
827 | X86_CR4_OSXMMEEXCPT
828 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
829#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
830 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
831 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
832#endif
833 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
834 return ~fGstMask;
835}
836
837
838/**
839 * Adds one or more exceptions to the exception bitmap and commits it to the current
840 * VMCS.
841 *
842 * @param pVCpu The cross context virtual CPU structure.
843 * @param pVmxTransient The VMX-transient structure.
844 * @param uXcptMask The exception(s) to add.
845 */
846static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
847{
848 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
849 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
850 if ((uXcptBitmap & uXcptMask) != uXcptMask)
851 {
852 uXcptBitmap |= uXcptMask;
853 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
854 AssertRC(rc);
855 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
856 }
857}
858
859
860/**
861 * Adds an exception to the exception bitmap and commits it to the current VMCS.
862 *
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param pVmxTransient The VMX-transient structure.
865 * @param uXcpt The exception to add.
866 */
867static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
868{
869 Assert(uXcpt <= X86_XCPT_LAST);
870 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
871}
872
873
874/**
875 * Remove one or more exceptions from the exception bitmap and commits it to the
876 * current VMCS.
877 *
878 * This takes care of not removing the exception intercept if a nested-guest
879 * requires the exception to be intercepted.
880 *
881 * @returns VBox status code.
882 * @param pVCpu The cross context virtual CPU structure.
883 * @param pVmxTransient The VMX-transient structure.
884 * @param uXcptMask The exception(s) to remove.
885 */
886static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
887{
888 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
889 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
890 if (u32XcptBitmap & uXcptMask)
891 {
892#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
893 if (!pVmxTransient->fIsNestedGuest)
894 { /* likely */ }
895 else
896 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
897#endif
898#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
899 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
900 | RT_BIT(X86_XCPT_DE)
901 | RT_BIT(X86_XCPT_NM)
902 | RT_BIT(X86_XCPT_TS)
903 | RT_BIT(X86_XCPT_UD)
904 | RT_BIT(X86_XCPT_NP)
905 | RT_BIT(X86_XCPT_SS)
906 | RT_BIT(X86_XCPT_GP)
907 | RT_BIT(X86_XCPT_PF)
908 | RT_BIT(X86_XCPT_MF));
909#elif defined(HMVMX_ALWAYS_TRAP_PF)
910 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
911#endif
912 if (uXcptMask)
913 {
914 /* Validate we are not removing any essential exception intercepts. */
915#ifndef IN_NEM_DARWIN
916 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
917#else
918 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
919#endif
920 NOREF(pVCpu);
921 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
922 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
923
924 /* Remove it from the exception bitmap. */
925 u32XcptBitmap &= ~uXcptMask;
926
927 /* Commit and update the cache if necessary. */
928 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
929 {
930 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
931 AssertRC(rc);
932 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
933 }
934 }
935 }
936 return VINF_SUCCESS;
937}
938
939
940/**
941 * Remove an exceptions from the exception bitmap and commits it to the current
942 * VMCS.
943 *
944 * @returns VBox status code.
945 * @param pVCpu The cross context virtual CPU structure.
946 * @param pVmxTransient The VMX-transient structure.
947 * @param uXcpt The exception to remove.
948 */
949static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
950{
951 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
952}
953
954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
955
956/**
957 * Loads the shadow VMCS specified by the VMCS info. object.
958 *
959 * @returns VBox status code.
960 * @param pVmcsInfo The VMCS info. object.
961 *
962 * @remarks Can be called with interrupts disabled.
963 */
964static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
965{
966 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
967 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
968
969 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
970 if (RT_SUCCESS(rc))
971 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
972 return rc;
973}
974
975
976/**
977 * Clears the shadow VMCS specified by the VMCS info. object.
978 *
979 * @returns VBox status code.
980 * @param pVmcsInfo The VMCS info. object.
981 *
982 * @remarks Can be called with interrupts disabled.
983 */
984static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
985{
986 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
987 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
988
989 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
990 if (RT_SUCCESS(rc))
991 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
992 return rc;
993}
994
995
996/**
997 * Switches from and to the specified VMCSes.
998 *
999 * @returns VBox status code.
1000 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1001 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1002 *
1003 * @remarks Called with interrupts disabled.
1004 */
1005static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1006{
1007 /*
1008 * Clear the VMCS we are switching out if it has not already been cleared.
1009 * This will sync any CPU internal data back to the VMCS.
1010 */
1011 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1012 {
1013 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1014 if (RT_SUCCESS(rc))
1015 {
1016 /*
1017 * The shadow VMCS, if any, would not be active at this point since we
1018 * would have cleared it while importing the virtual hardware-virtualization
1019 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1020 * clear the shadow VMCS here, just assert for safety.
1021 */
1022 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1023 }
1024 else
1025 return rc;
1026 }
1027
1028 /*
1029 * Clear the VMCS we are switching to if it has not already been cleared.
1030 * This will initialize the VMCS launch state to "clear" required for loading it.
1031 *
1032 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1033 */
1034 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1035 {
1036 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1037 if (RT_SUCCESS(rc))
1038 { /* likely */ }
1039 else
1040 return rc;
1041 }
1042
1043 /*
1044 * Finally, load the VMCS we are switching to.
1045 */
1046 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1047}
1048
1049
1050/**
1051 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1052 * caller.
1053 *
1054 * @returns VBox status code.
1055 * @param pVCpu The cross context virtual CPU structure.
1056 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1057 * true) or guest VMCS (pass false).
1058 */
1059static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1060{
1061 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1062 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1063
1064 PVMXVMCSINFO pVmcsInfoFrom;
1065 PVMXVMCSINFO pVmcsInfoTo;
1066 if (fSwitchToNstGstVmcs)
1067 {
1068 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1069 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1070 }
1071 else
1072 {
1073 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1074 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1075 }
1076
1077 /*
1078 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1079 * preemption hook code path acquires the current VMCS.
1080 */
1081 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1082
1083 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1084 if (RT_SUCCESS(rc))
1085 {
1086 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1087 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1088
1089 /*
1090 * If we are switching to a VMCS that was executed on a different host CPU or was
1091 * never executed before, flag that we need to export the host state before executing
1092 * guest/nested-guest code using hardware-assisted VMX.
1093 *
1094 * This could probably be done in a preemptible context since the preemption hook
1095 * will flag the necessary change in host context. However, since preemption is
1096 * already disabled and to avoid making assumptions about host specific code in
1097 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1098 * disabled.
1099 */
1100 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1101 { /* likely */ }
1102 else
1103 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1104
1105 ASMSetFlags(fEFlags);
1106
1107 /*
1108 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1109 * flag that we need to update the host MSR values there. Even if we decide in the
1110 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1111 * if its content differs, we would have to update the host MSRs anyway.
1112 */
1113 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1114 }
1115 else
1116 ASMSetFlags(fEFlags);
1117 return rc;
1118}
1119
1120#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1121#ifdef VBOX_STRICT
1122
1123/**
1124 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1125 * transient structure.
1126 *
1127 * @param pVCpu The cross context virtual CPU structure.
1128 * @param pVmxTransient The VMX-transient structure.
1129 */
1130DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1131{
1132 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1133 AssertRC(rc);
1134}
1135
1136
1137/**
1138 * Reads the VM-entry exception error code field from the VMCS into
1139 * the VMX transient structure.
1140 *
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1145{
1146 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1147 AssertRC(rc);
1148}
1149
1150
1151/**
1152 * Reads the VM-entry exception error code field from the VMCS into
1153 * the VMX transient structure.
1154 *
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param pVmxTransient The VMX-transient structure.
1157 */
1158DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1159{
1160 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1161 AssertRC(rc);
1162}
1163
1164#endif /* VBOX_STRICT */
1165
1166/**
1167 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1168 * transient structure.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure.
1171 * @param pVmxTransient The VMX-transient structure.
1172 */
1173DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1174{
1175 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1176 {
1177 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1178 AssertRC(rc);
1179 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1180 }
1181}
1182
1183
1184/**
1185 * Reads the VM-exit interruption error code from the VMCS into the VMX
1186 * transient structure.
1187 *
1188 * @param pVCpu The cross context virtual CPU structure.
1189 * @param pVmxTransient The VMX-transient structure.
1190 */
1191DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1192{
1193 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1194 {
1195 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1196 AssertRC(rc);
1197 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1198 }
1199}
1200
1201
1202/**
1203 * Reads the VM-exit instruction length field from the VMCS into the VMX
1204 * transient structure.
1205 *
1206 * @param pVCpu The cross context virtual CPU structure.
1207 * @param pVmxTransient The VMX-transient structure.
1208 */
1209DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1210{
1211 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1212 {
1213 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1214 AssertRC(rc);
1215 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1216 }
1217}
1218
1219
1220/**
1221 * Reads the VM-exit instruction-information field from the VMCS into
1222 * the VMX transient structure.
1223 *
1224 * @param pVCpu The cross context virtual CPU structure.
1225 * @param pVmxTransient The VMX-transient structure.
1226 */
1227DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1228{
1229 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1230 {
1231 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1232 AssertRC(rc);
1233 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1234 }
1235}
1236
1237
1238/**
1239 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1240 *
1241 * @param pVCpu The cross context virtual CPU structure.
1242 * @param pVmxTransient The VMX-transient structure.
1243 */
1244DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1245{
1246 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1247 {
1248 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1249 AssertRC(rc);
1250 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1251 }
1252}
1253
1254
1255/**
1256 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1257 *
1258 * @param pVCpu The cross context virtual CPU structure.
1259 * @param pVmxTransient The VMX-transient structure.
1260 */
1261DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1262{
1263 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1264 {
1265 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1266 AssertRC(rc);
1267 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1268 }
1269}
1270
1271
1272/**
1273 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1274 *
1275 * @param pVCpu The cross context virtual CPU structure.
1276 * @param pVmxTransient The VMX-transient structure.
1277 */
1278DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1279{
1280 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1281 {
1282 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1283 AssertRC(rc);
1284 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1285 }
1286}
1287
1288#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1289/**
1290 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1291 * structure.
1292 *
1293 * @param pVCpu The cross context virtual CPU structure.
1294 * @param pVmxTransient The VMX-transient structure.
1295 */
1296DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1297{
1298 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1299 {
1300 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1301 AssertRC(rc);
1302 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1303 }
1304}
1305#endif
1306
1307/**
1308 * Reads the IDT-vectoring information field from the VMCS into the VMX
1309 * transient structure.
1310 *
1311 * @param pVCpu The cross context virtual CPU structure.
1312 * @param pVmxTransient The VMX-transient structure.
1313 *
1314 * @remarks No-long-jump zone!!!
1315 */
1316DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1317{
1318 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1319 {
1320 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1321 AssertRC(rc);
1322 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1323 }
1324}
1325
1326
1327/**
1328 * Reads the IDT-vectoring error code from the VMCS into the VMX
1329 * transient structure.
1330 *
1331 * @param pVCpu The cross context virtual CPU structure.
1332 * @param pVmxTransient The VMX-transient structure.
1333 */
1334DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1335{
1336 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1337 {
1338 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1341 }
1342}
1343
1344#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1345/**
1346 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1347 *
1348 * @param pVCpu The cross context virtual CPU structure.
1349 * @param pVmxTransient The VMX-transient structure.
1350 */
1351static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1352{
1353 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1354 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1355 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1356 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1357 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1358 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1359 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1360 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1361 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1362 AssertRC(rc);
1363 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1364 | HMVMX_READ_EXIT_INSTR_LEN
1365 | HMVMX_READ_EXIT_INSTR_INFO
1366 | HMVMX_READ_IDT_VECTORING_INFO
1367 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1368 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1369 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1370 | HMVMX_READ_GUEST_LINEAR_ADDR
1371 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1372}
1373#endif
1374
1375/**
1376 * Verifies that our cached values of the VMCS fields are all consistent with
1377 * what's actually present in the VMCS.
1378 *
1379 * @returns VBox status code.
1380 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1381 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1382 * VMCS content. HMCPU error-field is
1383 * updated, see VMX_VCI_XXX.
1384 * @param pVCpu The cross context virtual CPU structure.
1385 * @param pVmcsInfo The VMCS info. object.
1386 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1387 */
1388static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1389{
1390 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1391
1392 uint32_t u32Val;
1393 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1394 AssertRC(rc);
1395 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1396 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1397 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1398 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1399
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1403 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406
1407 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1408 AssertRC(rc);
1409 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1410 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1411 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1412 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1413
1414 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1415 AssertRC(rc);
1416 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1417 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1418 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1419 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1420
1421 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1422 {
1423 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1424 AssertRC(rc);
1425 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1426 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1427 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1428 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1429 }
1430
1431 uint64_t u64Val;
1432 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1433 {
1434 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1435 AssertRC(rc);
1436 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1437 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1438 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1439 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1440 }
1441
1442 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1443 AssertRC(rc);
1444 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1445 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1446 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1447 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1448
1449 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1450 AssertRC(rc);
1451 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1452 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1453 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1454 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1455
1456 NOREF(pcszVmcs);
1457 return VINF_SUCCESS;
1458}
1459
1460
1461/**
1462 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1463 * VMCS.
1464 *
1465 * This is typically required when the guest changes paging mode.
1466 *
1467 * @returns VBox status code.
1468 * @param pVCpu The cross context virtual CPU structure.
1469 * @param pVmxTransient The VMX-transient structure.
1470 *
1471 * @remarks Requires EFER.
1472 * @remarks No-long-jump zone!!!
1473 */
1474static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1475{
1476 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1477 {
1478 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1479 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1480
1481 /*
1482 * VM-entry controls.
1483 */
1484 {
1485 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1486 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1487
1488 /*
1489 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1490 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1491 *
1492 * For nested-guests, this is a mandatory VM-entry control. It's also
1493 * required because we do not want to leak host bits to the nested-guest.
1494 */
1495 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1496
1497 /*
1498 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1499 *
1500 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1501 * required to get the nested-guest working with hardware-assisted VMX execution.
1502 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1503 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1504 * here rather than while merging the guest VMCS controls.
1505 */
1506 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1507 {
1508 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1509 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1510 }
1511 else
1512 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1513
1514 /*
1515 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1516 *
1517 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1518 * regardless of whether the nested-guest VMCS specifies it because we are free to
1519 * load whatever MSRs we require and we do not need to modify the guest visible copy
1520 * of the VM-entry MSR load area.
1521 */
1522 if ( g_fHmVmxSupportsVmcsEfer
1523#ifndef IN_NEM_DARWIN
1524 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1525#endif
1526 )
1527 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1528 else
1529 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1530
1531 /*
1532 * The following should -not- be set (since we're not in SMM mode):
1533 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1534 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1535 */
1536
1537 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1538 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1539
1540 if ((fVal & fZap) == fVal)
1541 { /* likely */ }
1542 else
1543 {
1544 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1545 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1546 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1547 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1548 }
1549
1550 /* Commit it to the VMCS. */
1551 if (pVmcsInfo->u32EntryCtls != fVal)
1552 {
1553 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1554 AssertRC(rc);
1555 pVmcsInfo->u32EntryCtls = fVal;
1556 }
1557 }
1558
1559 /*
1560 * VM-exit controls.
1561 */
1562 {
1563 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1564 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1565
1566 /*
1567 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1568 * supported the 1-setting of this bit.
1569 *
1570 * For nested-guests, we set the "save debug controls" as the converse
1571 * "load debug controls" is mandatory for nested-guests anyway.
1572 */
1573 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1574
1575 /*
1576 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1577 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1578 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1579 * vmxHCExportHostMsrs().
1580 *
1581 * For nested-guests, we always set this bit as we do not support 32-bit
1582 * hosts.
1583 */
1584 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1585
1586#ifndef IN_NEM_DARWIN
1587 /*
1588 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1589 *
1590 * For nested-guests, we should use the "save IA32_EFER" control if we also
1591 * used the "load IA32_EFER" control while exporting VM-entry controls.
1592 */
1593 if ( g_fHmVmxSupportsVmcsEfer
1594 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1595 {
1596 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1597 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1598 }
1599#endif
1600
1601 /*
1602 * Enable saving of the VMX-preemption timer value on VM-exit.
1603 * For nested-guests, currently not exposed/used.
1604 */
1605 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1606 * the timer value. */
1607 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1608 {
1609 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1610 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1611 }
1612
1613 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1614 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1615
1616 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1617 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1618 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1619
1620 if ((fVal & fZap) == fVal)
1621 { /* likely */ }
1622 else
1623 {
1624 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1625 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1626 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1627 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1628 }
1629
1630 /* Commit it to the VMCS. */
1631 if (pVmcsInfo->u32ExitCtls != fVal)
1632 {
1633 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1634 AssertRC(rc);
1635 pVmcsInfo->u32ExitCtls = fVal;
1636 }
1637 }
1638
1639 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1640 }
1641 return VINF_SUCCESS;
1642}
1643
1644
1645/**
1646 * Sets the TPR threshold in the VMCS.
1647 *
1648 * @param pVCpu The cross context virtual CPU structure.
1649 * @param pVmcsInfo The VMCS info. object.
1650 * @param u32TprThreshold The TPR threshold (task-priority class only).
1651 */
1652DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1653{
1654 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1655 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1656 RT_NOREF(pVmcsInfo);
1657 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1658 AssertRC(rc);
1659}
1660
1661
1662/**
1663 * Exports the guest APIC TPR state into the VMCS.
1664 *
1665 * @param pVCpu The cross context virtual CPU structure.
1666 * @param pVmxTransient The VMX-transient structure.
1667 *
1668 * @remarks No-long-jump zone!!!
1669 */
1670static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1671{
1672 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1673 {
1674 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1675
1676 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1677 if (!pVmxTransient->fIsNestedGuest)
1678 {
1679 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1680 && APICIsEnabled(pVCpu))
1681 {
1682 /*
1683 * Setup TPR shadowing.
1684 */
1685 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1686 {
1687 bool fPendingIntr = false;
1688 uint8_t u8Tpr = 0;
1689 uint8_t u8PendingIntr = 0;
1690 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1691 AssertRC(rc);
1692
1693 /*
1694 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1695 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1696 * priority of the pending interrupt so we can deliver the interrupt. If there
1697 * are no interrupts pending, set threshold to 0 to not cause any
1698 * TPR-below-threshold VM-exits.
1699 */
1700 uint32_t u32TprThreshold = 0;
1701 if (fPendingIntr)
1702 {
1703 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1704 (which is the Task-Priority Class). */
1705 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1706 const uint8_t u8TprPriority = u8Tpr >> 4;
1707 if (u8PendingPriority <= u8TprPriority)
1708 u32TprThreshold = u8PendingPriority;
1709 }
1710
1711 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1712 }
1713 }
1714 }
1715 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1716 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1717 }
1718}
1719
1720
1721/**
1722 * Gets the guest interruptibility-state and updates related force-flags.
1723 *
1724 * @returns Guest's interruptibility-state.
1725 * @param pVCpu The cross context virtual CPU structure.
1726 *
1727 * @remarks No-long-jump zone!!!
1728 */
1729static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1730{
1731 /*
1732 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1733 */
1734 uint32_t fIntrState = 0;
1735 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1736 {
1737 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1738 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1739
1740 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1741 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1742 {
1743 if (pCtx->eflags.Bits.u1IF)
1744 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1745 else
1746 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1747 }
1748 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1749 {
1750 /*
1751 * We can clear the inhibit force flag as even if we go back to the recompiler
1752 * without executing guest code in VT-x, the flag's condition to be cleared is
1753 * met and thus the cleared state is correct.
1754 */
1755 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1756 }
1757 }
1758
1759 /*
1760 * Check if we should inhibit NMI delivery.
1761 */
1762 if (CPUMIsGuestNmiBlocking(pVCpu))
1763 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1764
1765 /*
1766 * Validate.
1767 */
1768#ifdef VBOX_STRICT
1769 /* We don't support block-by-SMI yet.*/
1770 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1771
1772 /* Block-by-STI must not be set when interrupts are disabled. */
1773 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1774 {
1775 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1776 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1777 }
1778#endif
1779
1780 return fIntrState;
1781}
1782
1783
1784/**
1785 * Exports the exception intercepts required for guest execution in the VMCS.
1786 *
1787 * @param pVCpu The cross context virtual CPU structure.
1788 * @param pVmxTransient The VMX-transient structure.
1789 *
1790 * @remarks No-long-jump zone!!!
1791 */
1792static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1793{
1794 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1795 {
1796 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1797 if ( !pVmxTransient->fIsNestedGuest
1798 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1799 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1800 else
1801 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1802
1803 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1804 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1805 }
1806}
1807
1808
1809/**
1810 * Exports the guest's RIP into the guest-state area in the VMCS.
1811 *
1812 * @param pVCpu The cross context virtual CPU structure.
1813 *
1814 * @remarks No-long-jump zone!!!
1815 */
1816static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1817{
1818 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1819 {
1820 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1821
1822 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1823 AssertRC(rc);
1824
1825 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1826 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1827 }
1828}
1829
1830
1831/**
1832 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1833 *
1834 * @param pVCpu The cross context virtual CPU structure.
1835 * @param pVmxTransient The VMX-transient structure.
1836 *
1837 * @remarks No-long-jump zone!!!
1838 */
1839static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1840{
1841 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1842 {
1843 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1844
1845 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1846 Let us assert it as such and use 32-bit VMWRITE. */
1847 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1848 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1849 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1850 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1851
1852#ifndef IN_NEM_DARWIN
1853 /*
1854 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1855 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1856 * can run the real-mode guest code under Virtual 8086 mode.
1857 */
1858 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1859 if (pVmcsInfo->RealMode.fRealOnV86Active)
1860 {
1861 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1862 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1863 Assert(!pVmxTransient->fIsNestedGuest);
1864 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1865 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1866 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1867 }
1868#else
1869 RT_NOREF(pVmxTransient);
1870#endif
1871
1872 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1873 AssertRC(rc);
1874
1875 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1876 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1877 }
1878}
1879
1880
1881#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1882/**
1883 * Copies the nested-guest VMCS to the shadow VMCS.
1884 *
1885 * @returns VBox status code.
1886 * @param pVCpu The cross context virtual CPU structure.
1887 * @param pVmcsInfo The VMCS info. object.
1888 *
1889 * @remarks No-long-jump zone!!!
1890 */
1891static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1892{
1893 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1894 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1895
1896 /*
1897 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1898 * current VMCS, as we may try saving guest lazy MSRs.
1899 *
1900 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1901 * calling the import VMCS code which is currently performing the guest MSR reads
1902 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1903 * and the rest of the VMX leave session machinery.
1904 */
1905 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1906
1907 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1908 if (RT_SUCCESS(rc))
1909 {
1910 /*
1911 * Copy all guest read/write VMCS fields.
1912 *
1913 * We don't check for VMWRITE failures here for performance reasons and
1914 * because they are not expected to fail, barring irrecoverable conditions
1915 * like hardware errors.
1916 */
1917 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1918 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1919 {
1920 uint64_t u64Val;
1921 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1922 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1923 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1924 }
1925
1926 /*
1927 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1928 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1929 */
1930 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1931 {
1932 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1933 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1934 {
1935 uint64_t u64Val;
1936 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1937 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1938 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1939 }
1940 }
1941
1942 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1943 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1944 }
1945
1946 ASMSetFlags(fEFlags);
1947 return rc;
1948}
1949
1950
1951/**
1952 * Copies the shadow VMCS to the nested-guest VMCS.
1953 *
1954 * @returns VBox status code.
1955 * @param pVCpu The cross context virtual CPU structure.
1956 * @param pVmcsInfo The VMCS info. object.
1957 *
1958 * @remarks Called with interrupts disabled.
1959 */
1960static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1961{
1962 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1963 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1964 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1965
1966 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1967 if (RT_SUCCESS(rc))
1968 {
1969 /*
1970 * Copy guest read/write fields from the shadow VMCS.
1971 * Guest read-only fields cannot be modified, so no need to copy them.
1972 *
1973 * We don't check for VMREAD failures here for performance reasons and
1974 * because they are not expected to fail, barring irrecoverable conditions
1975 * like hardware errors.
1976 */
1977 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1978 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1979 {
1980 uint64_t u64Val;
1981 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1982 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1983 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1984 }
1985
1986 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1987 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1988 }
1989 return rc;
1990}
1991
1992
1993/**
1994 * Enables VMCS shadowing for the given VMCS info. object.
1995 *
1996 * @param pVCpu The cross context virtual CPU structure.
1997 * @param pVmcsInfo The VMCS info. object.
1998 *
1999 * @remarks No-long-jump zone!!!
2000 */
2001static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2002{
2003 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2004 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2005 {
2006 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2007 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2008 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2009 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2010 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2011 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2012 Log4Func(("Enabled\n"));
2013 }
2014}
2015
2016
2017/**
2018 * Disables VMCS shadowing for the given VMCS info. object.
2019 *
2020 * @param pVCpu The cross context virtual CPU structure.
2021 * @param pVmcsInfo The VMCS info. object.
2022 *
2023 * @remarks No-long-jump zone!!!
2024 */
2025static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2026{
2027 /*
2028 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2029 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2030 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2031 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2032 *
2033 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2034 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2035 */
2036 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2037 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2038 {
2039 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2040 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2041 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2042 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2043 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2044 Log4Func(("Disabled\n"));
2045 }
2046}
2047#endif
2048
2049
2050/**
2051 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2052 *
2053 * The guest FPU state is always pre-loaded hence we don't need to bother about
2054 * sharing FPU related CR0 bits between the guest and host.
2055 *
2056 * @returns VBox status code.
2057 * @param pVCpu The cross context virtual CPU structure.
2058 * @param pVmxTransient The VMX-transient structure.
2059 *
2060 * @remarks No-long-jump zone!!!
2061 */
2062static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2063{
2064 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2065 {
2066 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2067 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2068
2069 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2070 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2071 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2072 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2073 else
2074 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2075
2076 if (!pVmxTransient->fIsNestedGuest)
2077 {
2078 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2079 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2080 uint64_t const u64ShadowCr0 = u64GuestCr0;
2081 Assert(!RT_HI_U32(u64GuestCr0));
2082
2083 /*
2084 * Setup VT-x's view of the guest CR0.
2085 */
2086 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2087 if (VM_IS_VMX_NESTED_PAGING(pVM))
2088 {
2089#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2090 if (CPUMIsGuestPagingEnabled(pVCpu))
2091 {
2092 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2093 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2094 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2095 }
2096 else
2097 {
2098 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2099 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2100 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2101 }
2102
2103 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2104 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2105 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2106#endif
2107 }
2108 else
2109 {
2110 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2111 u64GuestCr0 |= X86_CR0_WP;
2112 }
2113
2114 /*
2115 * Guest FPU bits.
2116 *
2117 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2118 * using CR0.TS.
2119 *
2120 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2121 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2122 */
2123 u64GuestCr0 |= X86_CR0_NE;
2124
2125 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2126 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2127
2128 /*
2129 * Update exception intercepts.
2130 */
2131 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2132#ifndef IN_NEM_DARWIN
2133 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2134 {
2135 Assert(PDMVmmDevHeapIsEnabled(pVM));
2136 Assert(pVM->hm.s.vmx.pRealModeTSS);
2137 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2138 }
2139 else
2140#endif
2141 {
2142 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2143 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2144 if (fInterceptMF)
2145 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2146 }
2147
2148 /* Additional intercepts for debugging, define these yourself explicitly. */
2149#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2150 uXcptBitmap |= 0
2151 | RT_BIT(X86_XCPT_BP)
2152 | RT_BIT(X86_XCPT_DE)
2153 | RT_BIT(X86_XCPT_NM)
2154 | RT_BIT(X86_XCPT_TS)
2155 | RT_BIT(X86_XCPT_UD)
2156 | RT_BIT(X86_XCPT_NP)
2157 | RT_BIT(X86_XCPT_SS)
2158 | RT_BIT(X86_XCPT_GP)
2159 | RT_BIT(X86_XCPT_PF)
2160 | RT_BIT(X86_XCPT_MF)
2161 ;
2162#elif defined(HMVMX_ALWAYS_TRAP_PF)
2163 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2164#endif
2165 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2166 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2167 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2168 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2169 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2170
2171 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2172 u64GuestCr0 |= fSetCr0;
2173 u64GuestCr0 &= fZapCr0;
2174 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2175
2176 /* Commit the CR0 and related fields to the guest VMCS. */
2177 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2178 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2179 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2180 {
2181 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2182 AssertRC(rc);
2183 }
2184 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2185 {
2186 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2187 AssertRC(rc);
2188 }
2189
2190 /* Update our caches. */
2191 pVmcsInfo->u32ProcCtls = uProcCtls;
2192 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2193
2194 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2195 }
2196 else
2197 {
2198 /*
2199 * With nested-guests, we may have extended the guest/host mask here since we
2200 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2201 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2202 * originally supplied. We must copy those bits from the nested-guest CR0 into
2203 * the nested-guest CR0 read-shadow.
2204 */
2205 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2206 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2207 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2208 Assert(!RT_HI_U32(u64GuestCr0));
2209 Assert(u64GuestCr0 & X86_CR0_NE);
2210
2211 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2212 u64GuestCr0 |= fSetCr0;
2213 u64GuestCr0 &= fZapCr0;
2214 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2215
2216 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2217 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2218 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2219
2220 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2221 }
2222
2223 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2224 }
2225
2226 return VINF_SUCCESS;
2227}
2228
2229
2230/**
2231 * Exports the guest control registers (CR3, CR4) into the guest-state area
2232 * in the VMCS.
2233 *
2234 * @returns VBox strict status code.
2235 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2236 * without unrestricted guest access and the VMMDev is not presently
2237 * mapped (e.g. EFI32).
2238 *
2239 * @param pVCpu The cross context virtual CPU structure.
2240 * @param pVmxTransient The VMX-transient structure.
2241 *
2242 * @remarks No-long-jump zone!!!
2243 */
2244static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2245{
2246 int rc = VINF_SUCCESS;
2247 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2248
2249 /*
2250 * Guest CR2.
2251 * It's always loaded in the assembler code. Nothing to do here.
2252 */
2253
2254 /*
2255 * Guest CR3.
2256 */
2257 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2258 {
2259 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2260
2261 if (VM_IS_VMX_NESTED_PAGING(pVM))
2262 {
2263#ifndef IN_NEM_DARWIN
2264 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2265 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2266
2267 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2268 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2269 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2270 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2271
2272 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2273 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2274 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2275
2276 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2277 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2278 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2279 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2280 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2281 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2282 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2283
2284 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2285 AssertRC(rc);
2286#endif
2287
2288 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2289 uint64_t u64GuestCr3 = pCtx->cr3;
2290 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2291 || CPUMIsGuestPagingEnabledEx(pCtx))
2292 {
2293 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2294 if (CPUMIsGuestInPAEModeEx(pCtx))
2295 {
2296 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2297 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2298 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2299 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2300 }
2301
2302 /*
2303 * The guest's view of its CR3 is unblemished with nested paging when the
2304 * guest is using paging or we have unrestricted guest execution to handle
2305 * the guest when it's not using paging.
2306 */
2307 }
2308#ifndef IN_NEM_DARWIN
2309 else
2310 {
2311 /*
2312 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2313 * thinks it accesses physical memory directly, we use our identity-mapped
2314 * page table to map guest-linear to guest-physical addresses. EPT takes care
2315 * of translating it to host-physical addresses.
2316 */
2317 RTGCPHYS GCPhys;
2318 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2319
2320 /* We obtain it here every time as the guest could have relocated this PCI region. */
2321 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2322 if (RT_SUCCESS(rc))
2323 { /* likely */ }
2324 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2325 {
2326 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2327 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2328 }
2329 else
2330 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2331
2332 u64GuestCr3 = GCPhys;
2333 }
2334#endif
2335
2336 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2337 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2338 AssertRC(rc);
2339 }
2340 else
2341 {
2342 Assert(!pVmxTransient->fIsNestedGuest);
2343 /* Non-nested paging case, just use the hypervisor's CR3. */
2344 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2345
2346 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2347 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2348 AssertRC(rc);
2349 }
2350
2351 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2352 }
2353
2354 /*
2355 * Guest CR4.
2356 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2357 */
2358 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2359 {
2360 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2361 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2362
2363 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2364 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2365
2366 /*
2367 * With nested-guests, we may have extended the guest/host mask here (since we
2368 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2369 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2370 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2371 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2372 */
2373 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2374 uint64_t u64GuestCr4 = pCtx->cr4;
2375 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2376 ? pCtx->cr4
2377 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2378 Assert(!RT_HI_U32(u64GuestCr4));
2379
2380#ifndef IN_NEM_DARWIN
2381 /*
2382 * Setup VT-x's view of the guest CR4.
2383 *
2384 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2385 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2386 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2387 *
2388 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2389 */
2390 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2391 {
2392 Assert(pVM->hm.s.vmx.pRealModeTSS);
2393 Assert(PDMVmmDevHeapIsEnabled(pVM));
2394 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2395 }
2396#endif
2397
2398 if (VM_IS_VMX_NESTED_PAGING(pVM))
2399 {
2400 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2401 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2402 {
2403 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2404 u64GuestCr4 |= X86_CR4_PSE;
2405 /* Our identity mapping is a 32-bit page directory. */
2406 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2407 }
2408 /* else use guest CR4.*/
2409 }
2410 else
2411 {
2412 Assert(!pVmxTransient->fIsNestedGuest);
2413
2414 /*
2415 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2416 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2417 */
2418 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2419 {
2420 case PGMMODE_REAL: /* Real-mode. */
2421 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2422 case PGMMODE_32_BIT: /* 32-bit paging. */
2423 {
2424 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2425 break;
2426 }
2427
2428 case PGMMODE_PAE: /* PAE paging. */
2429 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2430 {
2431 u64GuestCr4 |= X86_CR4_PAE;
2432 break;
2433 }
2434
2435 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2436 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2437 {
2438#ifdef VBOX_WITH_64_BITS_GUESTS
2439 /* For our assumption in vmxHCShouldSwapEferMsr. */
2440 Assert(u64GuestCr4 & X86_CR4_PAE);
2441 break;
2442#endif
2443 }
2444 default:
2445 AssertFailed();
2446 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2447 }
2448 }
2449
2450 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2451 u64GuestCr4 |= fSetCr4;
2452 u64GuestCr4 &= fZapCr4;
2453
2454 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2455 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2456 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2457
2458#ifndef IN_NEM_DARWIN
2459 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2460 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2461 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2462 {
2463 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2464 hmR0VmxUpdateStartVmFunction(pVCpu);
2465 }
2466#endif
2467
2468 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2469
2470 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2471 }
2472 return rc;
2473}
2474
2475
2476#ifdef VBOX_STRICT
2477/**
2478 * Strict function to validate segment registers.
2479 *
2480 * @param pVCpu The cross context virtual CPU structure.
2481 * @param pVmcsInfo The VMCS info. object.
2482 *
2483 * @remarks Will import guest CR0 on strict builds during validation of
2484 * segments.
2485 */
2486static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2487{
2488 /*
2489 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2490 *
2491 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2492 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2493 * unusable bit and doesn't change the guest-context value.
2494 */
2495 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2496 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2497 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2498 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2499 && ( !CPUMIsGuestInRealModeEx(pCtx)
2500 && !CPUMIsGuestInV86ModeEx(pCtx)))
2501 {
2502 /* Protected mode checks */
2503 /* CS */
2504 Assert(pCtx->cs.Attr.n.u1Present);
2505 Assert(!(pCtx->cs.Attr.u & 0xf00));
2506 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2507 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2508 || !(pCtx->cs.Attr.n.u1Granularity));
2509 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2510 || (pCtx->cs.Attr.n.u1Granularity));
2511 /* CS cannot be loaded with NULL in protected mode. */
2512 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2513 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2514 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2515 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2516 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2517 else
2518 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2519 /* SS */
2520 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2521 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2522 if ( !(pCtx->cr0 & X86_CR0_PE)
2523 || pCtx->cs.Attr.n.u4Type == 3)
2524 {
2525 Assert(!pCtx->ss.Attr.n.u2Dpl);
2526 }
2527 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2528 {
2529 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2530 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2531 Assert(pCtx->ss.Attr.n.u1Present);
2532 Assert(!(pCtx->ss.Attr.u & 0xf00));
2533 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2534 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2535 || !(pCtx->ss.Attr.n.u1Granularity));
2536 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2537 || (pCtx->ss.Attr.n.u1Granularity));
2538 }
2539 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2540 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2541 {
2542 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2543 Assert(pCtx->ds.Attr.n.u1Present);
2544 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2545 Assert(!(pCtx->ds.Attr.u & 0xf00));
2546 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2547 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2548 || !(pCtx->ds.Attr.n.u1Granularity));
2549 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2550 || (pCtx->ds.Attr.n.u1Granularity));
2551 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2552 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2553 }
2554 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2555 {
2556 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2557 Assert(pCtx->es.Attr.n.u1Present);
2558 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2559 Assert(!(pCtx->es.Attr.u & 0xf00));
2560 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2561 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2562 || !(pCtx->es.Attr.n.u1Granularity));
2563 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2564 || (pCtx->es.Attr.n.u1Granularity));
2565 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2566 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2567 }
2568 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2569 {
2570 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2571 Assert(pCtx->fs.Attr.n.u1Present);
2572 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2573 Assert(!(pCtx->fs.Attr.u & 0xf00));
2574 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2575 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2576 || !(pCtx->fs.Attr.n.u1Granularity));
2577 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2578 || (pCtx->fs.Attr.n.u1Granularity));
2579 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2580 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2581 }
2582 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2583 {
2584 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2585 Assert(pCtx->gs.Attr.n.u1Present);
2586 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2587 Assert(!(pCtx->gs.Attr.u & 0xf00));
2588 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2589 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2590 || !(pCtx->gs.Attr.n.u1Granularity));
2591 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2592 || (pCtx->gs.Attr.n.u1Granularity));
2593 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2594 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2595 }
2596 /* 64-bit capable CPUs. */
2597 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2598 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2599 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2600 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2601 }
2602 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2603 || ( CPUMIsGuestInRealModeEx(pCtx)
2604 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2605 {
2606 /* Real and v86 mode checks. */
2607 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2608 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2609#ifndef IN_NEM_DARWIN
2610 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2611 {
2612 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2613 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2614 }
2615 else
2616#endif
2617 {
2618 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2619 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2620 }
2621
2622 /* CS */
2623 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2624 Assert(pCtx->cs.u32Limit == 0xffff);
2625 Assert(u32CSAttr == 0xf3);
2626 /* SS */
2627 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2628 Assert(pCtx->ss.u32Limit == 0xffff);
2629 Assert(u32SSAttr == 0xf3);
2630 /* DS */
2631 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2632 Assert(pCtx->ds.u32Limit == 0xffff);
2633 Assert(u32DSAttr == 0xf3);
2634 /* ES */
2635 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2636 Assert(pCtx->es.u32Limit == 0xffff);
2637 Assert(u32ESAttr == 0xf3);
2638 /* FS */
2639 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2640 Assert(pCtx->fs.u32Limit == 0xffff);
2641 Assert(u32FSAttr == 0xf3);
2642 /* GS */
2643 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2644 Assert(pCtx->gs.u32Limit == 0xffff);
2645 Assert(u32GSAttr == 0xf3);
2646 /* 64-bit capable CPUs. */
2647 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2648 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2649 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2650 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2651 }
2652}
2653#endif /* VBOX_STRICT */
2654
2655
2656/**
2657 * Exports a guest segment register into the guest-state area in the VMCS.
2658 *
2659 * @returns VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure.
2661 * @param pVmcsInfo The VMCS info. object.
2662 * @param iSegReg The segment register number (X86_SREG_XXX).
2663 * @param pSelReg Pointer to the segment selector.
2664 *
2665 * @remarks No-long-jump zone!!!
2666 */
2667static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2668{
2669 Assert(iSegReg < X86_SREG_COUNT);
2670
2671 uint32_t u32Access = pSelReg->Attr.u;
2672#ifndef IN_NEM_DARWIN
2673 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2674#endif
2675 {
2676 /*
2677 * The way to differentiate between whether this is really a null selector or was just
2678 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2679 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2680 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2681 * NULL selectors loaded in protected-mode have their attribute as 0.
2682 */
2683 if (u32Access)
2684 { }
2685 else
2686 u32Access = X86DESCATTR_UNUSABLE;
2687 }
2688#ifndef IN_NEM_DARWIN
2689 else
2690 {
2691 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2692 u32Access = 0xf3;
2693 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2694 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2695 RT_NOREF_PV(pVCpu);
2696 }
2697#else
2698 RT_NOREF(pVmcsInfo);
2699#endif
2700
2701 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2702 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2703 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2704
2705 /*
2706 * Commit it to the VMCS.
2707 */
2708 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2709 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2710 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2711 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2712 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2713 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2714 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2715 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2716 return VINF_SUCCESS;
2717}
2718
2719
2720/**
2721 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2722 * area in the VMCS.
2723 *
2724 * @returns VBox status code.
2725 * @param pVCpu The cross context virtual CPU structure.
2726 * @param pVmxTransient The VMX-transient structure.
2727 *
2728 * @remarks Will import guest CR0 on strict builds during validation of
2729 * segments.
2730 * @remarks No-long-jump zone!!!
2731 */
2732static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2733{
2734 int rc = VERR_INTERNAL_ERROR_5;
2735#ifndef IN_NEM_DARWIN
2736 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2737#endif
2738 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2739 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2740#ifndef IN_NEM_DARWIN
2741 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2742#endif
2743
2744 /*
2745 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2746 */
2747 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2748 {
2749 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2750 {
2751 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2752#ifndef IN_NEM_DARWIN
2753 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2754 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2755#endif
2756 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2757 AssertRC(rc);
2758 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2759 }
2760
2761 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2762 {
2763 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2764#ifndef IN_NEM_DARWIN
2765 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2766 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2767#endif
2768 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2769 AssertRC(rc);
2770 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2771 }
2772
2773 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2774 {
2775 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2776#ifndef IN_NEM_DARWIN
2777 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2778 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2779#endif
2780 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2781 AssertRC(rc);
2782 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2783 }
2784
2785 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2786 {
2787 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2788#ifndef IN_NEM_DARWIN
2789 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2790 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2791#endif
2792 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2793 AssertRC(rc);
2794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2795 }
2796
2797 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2798 {
2799 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2800#ifndef IN_NEM_DARWIN
2801 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2802 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2803#endif
2804 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2805 AssertRC(rc);
2806 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2807 }
2808
2809 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2810 {
2811 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2812#ifndef IN_NEM_DARWIN
2813 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2814 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2815#endif
2816 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2817 AssertRC(rc);
2818 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2819 }
2820
2821#ifdef VBOX_STRICT
2822 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2823#endif
2824 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2825 pCtx->cs.Attr.u));
2826 }
2827
2828 /*
2829 * Guest TR.
2830 */
2831 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2832 {
2833 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2834
2835 /*
2836 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2837 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2838 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2839 */
2840 uint16_t u16Sel;
2841 uint32_t u32Limit;
2842 uint64_t u64Base;
2843 uint32_t u32AccessRights;
2844#ifndef IN_NEM_DARWIN
2845 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2846#endif
2847 {
2848 u16Sel = pCtx->tr.Sel;
2849 u32Limit = pCtx->tr.u32Limit;
2850 u64Base = pCtx->tr.u64Base;
2851 u32AccessRights = pCtx->tr.Attr.u;
2852 }
2853#ifndef IN_NEM_DARWIN
2854 else
2855 {
2856 Assert(!pVmxTransient->fIsNestedGuest);
2857 Assert(pVM->hm.s.vmx.pRealModeTSS);
2858 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2859
2860 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2861 RTGCPHYS GCPhys;
2862 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2863 AssertRCReturn(rc, rc);
2864
2865 X86DESCATTR DescAttr;
2866 DescAttr.u = 0;
2867 DescAttr.n.u1Present = 1;
2868 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2869
2870 u16Sel = 0;
2871 u32Limit = HM_VTX_TSS_SIZE;
2872 u64Base = GCPhys;
2873 u32AccessRights = DescAttr.u;
2874 }
2875#endif
2876
2877 /* Validate. */
2878 Assert(!(u16Sel & RT_BIT(2)));
2879 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2880 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2881 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2882 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2883 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2884 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2885 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2886 Assert( (u32Limit & 0xfff) == 0xfff
2887 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2888 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2889 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2890
2891 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2892 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2893 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2895
2896 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2897 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2898 }
2899
2900 /*
2901 * Guest GDTR.
2902 */
2903 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2904 {
2905 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2906
2907 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2908 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2909
2910 /* Validate. */
2911 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2912
2913 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2914 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2915 }
2916
2917 /*
2918 * Guest LDTR.
2919 */
2920 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2921 {
2922 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2923
2924 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2925 uint32_t u32Access;
2926 if ( !pVmxTransient->fIsNestedGuest
2927 && !pCtx->ldtr.Attr.u)
2928 u32Access = X86DESCATTR_UNUSABLE;
2929 else
2930 u32Access = pCtx->ldtr.Attr.u;
2931
2932 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2935 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2936
2937 /* Validate. */
2938 if (!(u32Access & X86DESCATTR_UNUSABLE))
2939 {
2940 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2941 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2942 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2943 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2944 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2945 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2946 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2947 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2948 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2949 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2950 }
2951
2952 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2953 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2954 }
2955
2956 /*
2957 * Guest IDTR.
2958 */
2959 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2960 {
2961 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2962
2963 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2964 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2965
2966 /* Validate. */
2967 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2968
2969 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2970 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2971 }
2972
2973 return VINF_SUCCESS;
2974}
2975
2976
2977/**
2978 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2979 * VM-exit interruption info type.
2980 *
2981 * @returns The IEM exception flags.
2982 * @param uVector The event vector.
2983 * @param uVmxEventType The VMX event type.
2984 *
2985 * @remarks This function currently only constructs flags required for
2986 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2987 * and CR2 aspects of an exception are not included).
2988 */
2989static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2990{
2991 uint32_t fIemXcptFlags;
2992 switch (uVmxEventType)
2993 {
2994 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2995 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2996 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2997 break;
2998
2999 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
3000 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
3001 break;
3002
3003 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3004 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3005 break;
3006
3007 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3008 {
3009 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3010 if (uVector == X86_XCPT_BP)
3011 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3012 else if (uVector == X86_XCPT_OF)
3013 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3014 else
3015 {
3016 fIemXcptFlags = 0;
3017 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3018 }
3019 break;
3020 }
3021
3022 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3023 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3024 break;
3025
3026 default:
3027 fIemXcptFlags = 0;
3028 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3029 break;
3030 }
3031 return fIemXcptFlags;
3032}
3033
3034
3035/**
3036 * Sets an event as a pending event to be injected into the guest.
3037 *
3038 * @param pVCpu The cross context virtual CPU structure.
3039 * @param u32IntInfo The VM-entry interruption-information field.
3040 * @param cbInstr The VM-entry instruction length in bytes (for
3041 * software interrupts, exceptions and privileged
3042 * software exceptions).
3043 * @param u32ErrCode The VM-entry exception error code.
3044 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3045 * page-fault.
3046 */
3047DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3048 RTGCUINTPTR GCPtrFaultAddress)
3049{
3050 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3051 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3052 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3053 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3054 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3055 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3056}
3057
3058
3059/**
3060 * Sets an external interrupt as pending-for-injection into the VM.
3061 *
3062 * @param pVCpu The cross context virtual CPU structure.
3063 * @param u8Interrupt The external interrupt vector.
3064 */
3065DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105/**
3106 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3107 *
3108 * @param pVCpu The cross context virtual CPU structure.
3109 */
3110DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3111{
3112 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3113 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3114 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3116 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3117}
3118
3119
3120/**
3121 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3122 *
3123 * @param pVCpu The cross context virtual CPU structure.
3124 */
3125DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3126{
3127 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3129 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3130 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3131 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3132}
3133
3134
3135#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3136/**
3137 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3138 *
3139 * @param pVCpu The cross context virtual CPU structure.
3140 * @param u32ErrCode The error code for the general-protection exception.
3141 */
3142DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3143{
3144 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3145 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3146 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3147 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3148 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3149}
3150
3151
3152/**
3153 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3154 *
3155 * @param pVCpu The cross context virtual CPU structure.
3156 * @param u32ErrCode The error code for the stack exception.
3157 */
3158DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3159{
3160 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3161 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3162 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3163 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3164 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3165}
3166#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3167
3168
3169/**
3170 * Fixes up attributes for the specified segment register.
3171 *
3172 * @param pVCpu The cross context virtual CPU structure.
3173 * @param pSelReg The segment register that needs fixing.
3174 * @param pszRegName The register name (for logging and assertions).
3175 */
3176static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3177{
3178 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3179
3180 /*
3181 * If VT-x marks the segment as unusable, most other bits remain undefined:
3182 * - For CS the L, D and G bits have meaning.
3183 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3184 * - For the remaining data segments no bits are defined.
3185 *
3186 * The present bit and the unusable bit has been observed to be set at the
3187 * same time (the selector was supposed to be invalid as we started executing
3188 * a V8086 interrupt in ring-0).
3189 *
3190 * What should be important for the rest of the VBox code, is that the P bit is
3191 * cleared. Some of the other VBox code recognizes the unusable bit, but
3192 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3193 * safe side here, we'll strip off P and other bits we don't care about. If
3194 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3195 *
3196 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3197 */
3198#ifdef VBOX_STRICT
3199 uint32_t const uAttr = pSelReg->Attr.u;
3200#endif
3201
3202 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3203 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3204 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3205
3206#ifdef VBOX_STRICT
3207# ifndef IN_NEM_DARWIN
3208 VMMRZCallRing3Disable(pVCpu);
3209# endif
3210 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3211# ifdef DEBUG_bird
3212 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3213 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3214 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3215# endif
3216# ifndef IN_NEM_DARWIN
3217 VMMRZCallRing3Enable(pVCpu);
3218# endif
3219 NOREF(uAttr);
3220#endif
3221 RT_NOREF2(pVCpu, pszRegName);
3222}
3223
3224
3225/**
3226 * Imports a guest segment register from the current VMCS into the guest-CPU
3227 * context.
3228 *
3229 * @param pVCpu The cross context virtual CPU structure.
3230 * @param iSegReg The segment register number (X86_SREG_XXX).
3231 *
3232 * @remarks Called with interrupts and/or preemption disabled.
3233 */
3234static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3235{
3236 Assert(iSegReg < X86_SREG_COUNT);
3237 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3238 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3239 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3240 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3241
3242 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3243
3244 uint16_t u16Sel;
3245 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3246 pSelReg->Sel = u16Sel;
3247 pSelReg->ValidSel = u16Sel;
3248
3249 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3250 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3251
3252 uint32_t u32Attr;
3253 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3254 pSelReg->Attr.u = u32Attr;
3255 if (u32Attr & X86DESCATTR_UNUSABLE)
3256 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3257
3258 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3259}
3260
3261
3262/**
3263 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3264 *
3265 * @param pVCpu The cross context virtual CPU structure.
3266 *
3267 * @remarks Called with interrupts and/or preemption disabled.
3268 */
3269static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3270{
3271 uint16_t u16Sel;
3272 uint64_t u64Base;
3273 uint32_t u32Limit, u32Attr;
3274 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3275 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3276 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3277 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3278
3279 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3280 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3281 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3282 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3283 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3284 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3285 if (u32Attr & X86DESCATTR_UNUSABLE)
3286 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3287}
3288
3289
3290/**
3291 * Imports the guest TR from the current VMCS into the guest-CPU context.
3292 *
3293 * @param pVCpu The cross context virtual CPU structure.
3294 *
3295 * @remarks Called with interrupts and/or preemption disabled.
3296 */
3297static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3298{
3299 uint16_t u16Sel;
3300 uint64_t u64Base;
3301 uint32_t u32Limit, u32Attr;
3302 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3303 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3304 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3305 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3306
3307 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3308 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3309 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3310 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3311 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3312 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3313 /* TR is the only selector that can never be unusable. */
3314 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 uint64_t u64Val;
3330 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3331 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3332 {
3333 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3334 AssertRC(rc);
3335
3336 pCtx->rip = u64Val;
3337 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3338 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3339 }
3340}
3341
3342
3343/**
3344 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3345 *
3346 * @param pVCpu The cross context virtual CPU structure.
3347 * @param pVmcsInfo The VMCS info. object.
3348 *
3349 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3350 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3351 * instead!!!
3352 */
3353static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3354{
3355 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3356 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3357 {
3358 uint64_t u64Val;
3359 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3360 AssertRC(rc);
3361
3362 pCtx->rflags.u64 = u64Val;
3363#ifndef IN_NEM_DARWIN
3364 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3365 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3366 {
3367 pCtx->eflags.Bits.u1VM = 0;
3368 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3369 }
3370#else
3371 RT_NOREF(pVmcsInfo);
3372#endif
3373 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3374 }
3375}
3376
3377
3378/**
3379 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3380 * context.
3381 *
3382 * @param pVCpu The cross context virtual CPU structure.
3383 * @param pVmcsInfo The VMCS info. object.
3384 *
3385 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3386 * do not log!
3387 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3388 * instead!!!
3389 */
3390static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3391{
3392 uint32_t u32Val;
3393 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3394 if (!u32Val)
3395 {
3396 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3397 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3398 CPUMSetGuestNmiBlocking(pVCpu, false);
3399 }
3400 else
3401 {
3402 /*
3403 * We must import RIP here to set our EM interrupt-inhibited state.
3404 * We also import RFLAGS as our code that evaluates pending interrupts
3405 * before VM-entry requires it.
3406 */
3407 vmxHCImportGuestRip(pVCpu);
3408 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3409
3410 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3411 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3412 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3413 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3414
3415 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3416 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3417 }
3418}
3419
3420
3421/**
3422 * Worker for VMXR0ImportStateOnDemand.
3423 *
3424 * @returns VBox status code.
3425 * @param pVCpu The cross context virtual CPU structure.
3426 * @param pVmcsInfo The VMCS info. object.
3427 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3428 */
3429static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3430{
3431 int rc = VINF_SUCCESS;
3432 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3433 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3434 uint32_t u32Val;
3435
3436 /*
3437 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3438 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3439 * neither are other host platforms.
3440 *
3441 * Committing this temporarily as it prevents BSOD.
3442 *
3443 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3444 */
3445# ifdef RT_OS_WINDOWS
3446 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3447 return VERR_HM_IPE_1;
3448# endif
3449
3450 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3451
3452#ifndef IN_NEM_DARWIN
3453 /*
3454 * We disable interrupts to make the updating of the state and in particular
3455 * the fExtrn modification atomic wrt to preemption hooks.
3456 */
3457 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3458#endif
3459
3460 fWhat &= pCtx->fExtrn;
3461 if (fWhat)
3462 {
3463 do
3464 {
3465 if (fWhat & CPUMCTX_EXTRN_RIP)
3466 vmxHCImportGuestRip(pVCpu);
3467
3468 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3469 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3470
3471 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3472 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3473
3474 if (fWhat & CPUMCTX_EXTRN_RSP)
3475 {
3476 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3477 AssertRC(rc);
3478 }
3479
3480 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3481 {
3482 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3483#ifndef IN_NEM_DARWIN
3484 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3485#else
3486 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3487#endif
3488 if (fWhat & CPUMCTX_EXTRN_CS)
3489 {
3490 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3491 vmxHCImportGuestRip(pVCpu);
3492 if (fRealOnV86Active)
3493 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3494 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3495 }
3496 if (fWhat & CPUMCTX_EXTRN_SS)
3497 {
3498 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3499 if (fRealOnV86Active)
3500 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3501 }
3502 if (fWhat & CPUMCTX_EXTRN_DS)
3503 {
3504 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3505 if (fRealOnV86Active)
3506 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_ES)
3509 {
3510 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3511 if (fRealOnV86Active)
3512 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3513 }
3514 if (fWhat & CPUMCTX_EXTRN_FS)
3515 {
3516 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3517 if (fRealOnV86Active)
3518 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3519 }
3520 if (fWhat & CPUMCTX_EXTRN_GS)
3521 {
3522 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3523 if (fRealOnV86Active)
3524 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3525 }
3526 }
3527
3528 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3529 {
3530 if (fWhat & CPUMCTX_EXTRN_LDTR)
3531 vmxHCImportGuestLdtr(pVCpu);
3532
3533 if (fWhat & CPUMCTX_EXTRN_GDTR)
3534 {
3535 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3536 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3537 pCtx->gdtr.cbGdt = u32Val;
3538 }
3539
3540 /* Guest IDTR. */
3541 if (fWhat & CPUMCTX_EXTRN_IDTR)
3542 {
3543 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3544 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3545 pCtx->idtr.cbIdt = u32Val;
3546 }
3547
3548 /* Guest TR. */
3549 if (fWhat & CPUMCTX_EXTRN_TR)
3550 {
3551#ifndef IN_NEM_DARWIN
3552 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3553 don't need to import that one. */
3554 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3555#endif
3556 vmxHCImportGuestTr(pVCpu);
3557 }
3558 }
3559
3560 if (fWhat & CPUMCTX_EXTRN_DR7)
3561 {
3562#ifndef IN_NEM_DARWIN
3563 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3564#endif
3565 {
3566 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3567 AssertRC(rc);
3568 }
3569 }
3570
3571 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3572 {
3573 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3574 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3575 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3576 pCtx->SysEnter.cs = u32Val;
3577 }
3578
3579#ifndef IN_NEM_DARWIN
3580 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3581 {
3582 if ( pVM->hmr0.s.fAllow64BitGuests
3583 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3584 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3585 }
3586
3587 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3588 {
3589 if ( pVM->hmr0.s.fAllow64BitGuests
3590 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3591 {
3592 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3593 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3594 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3595 }
3596 }
3597
3598 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3599 {
3600 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3601 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3602 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3603 Assert(pMsrs);
3604 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3605 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3606 for (uint32_t i = 0; i < cMsrs; i++)
3607 {
3608 uint32_t const idMsr = pMsrs[i].u32Msr;
3609 switch (idMsr)
3610 {
3611 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3612 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3613 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3614 default:
3615 {
3616 uint32_t idxLbrMsr;
3617 if (VM_IS_VMX_LBR(pVM))
3618 {
3619 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3620 {
3621 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3622 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3623 break;
3624 }
3625 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3626 {
3627 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3628 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3629 break;
3630 }
3631 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3632 {
3633 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3634 break;
3635 }
3636 /* Fallthru (no break) */
3637 }
3638 pCtx->fExtrn = 0;
3639 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3640 ASMSetFlags(fEFlags);
3641 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3642 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3643 }
3644 }
3645 }
3646 }
3647#endif
3648
3649 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3650 {
3651 if (fWhat & CPUMCTX_EXTRN_CR0)
3652 {
3653 uint64_t u64Cr0;
3654 uint64_t u64Shadow;
3655 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3656 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3657#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3658 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3659 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3660#else
3661 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3662 {
3663 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3664 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3665 }
3666 else
3667 {
3668 /*
3669 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3670 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3671 * re-construct CR0. See @bugref{9180#c95} for details.
3672 */
3673 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3674 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3675 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3676 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3677 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3678 }
3679#endif
3680#ifndef IN_NEM_DARWIN
3681 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3682#endif
3683 CPUMSetGuestCR0(pVCpu, u64Cr0);
3684#ifndef IN_NEM_DARWIN
3685 VMMRZCallRing3Enable(pVCpu);
3686#endif
3687 }
3688
3689 if (fWhat & CPUMCTX_EXTRN_CR4)
3690 {
3691 uint64_t u64Cr4;
3692 uint64_t u64Shadow;
3693 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3694 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3695#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3696 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3697 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3698#else
3699 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3700 {
3701 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3702 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3703 }
3704 else
3705 {
3706 /*
3707 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3708 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3709 * re-construct CR4. See @bugref{9180#c95} for details.
3710 */
3711 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3712 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3713 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3714 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3715 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3716 }
3717#endif
3718 pCtx->cr4 = u64Cr4;
3719 }
3720
3721 if (fWhat & CPUMCTX_EXTRN_CR3)
3722 {
3723 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3724 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3725 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3726 && CPUMIsGuestPagingEnabledEx(pCtx)))
3727 {
3728 uint64_t u64Cr3;
3729 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3730 if (pCtx->cr3 != u64Cr3)
3731 {
3732 pCtx->cr3 = u64Cr3;
3733 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3734 }
3735
3736 /*
3737 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3738 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3739 */
3740 if (CPUMIsGuestInPAEModeEx(pCtx))
3741 {
3742 X86PDPE aPaePdpes[4];
3743 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3744 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3746 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3747 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3748 {
3749 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3750 /* PGM now updates PAE PDPTEs while updating CR3. */
3751 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3752 }
3753 }
3754 }
3755 }
3756 }
3757
3758#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3759 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3760 {
3761 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3762 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3763 {
3764 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3765 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3766 if (RT_SUCCESS(rc))
3767 { /* likely */ }
3768 else
3769 break;
3770 }
3771 }
3772#endif
3773 } while (0);
3774
3775 if (RT_SUCCESS(rc))
3776 {
3777 /* Update fExtrn. */
3778 pCtx->fExtrn &= ~fWhat;
3779
3780 /* If everything has been imported, clear the HM keeper bit. */
3781 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3782 {
3783#ifndef IN_NEM_DARWIN
3784 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3785#else
3786 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3787#endif
3788 Assert(!pCtx->fExtrn);
3789 }
3790 }
3791 }
3792#ifndef IN_NEM_DARWIN
3793 else
3794 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3795
3796 /*
3797 * Restore interrupts.
3798 */
3799 ASMSetFlags(fEFlags);
3800#endif
3801
3802 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3803
3804 if (RT_SUCCESS(rc))
3805 { /* likely */ }
3806 else
3807 return rc;
3808
3809 /*
3810 * Honor any pending CR3 updates.
3811 *
3812 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3813 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3814 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3815 *
3816 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3817 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3818 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3819 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3820 *
3821 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3822 *
3823 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3824 */
3825 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3826#ifndef IN_NEM_DARWIN
3827 && VMMRZCallRing3IsEnabled(pVCpu)
3828#endif
3829 )
3830 {
3831 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3832 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3833 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3834 }
3835
3836 return VINF_SUCCESS;
3837}
3838
3839
3840/**
3841 * Check per-VM and per-VCPU force flag actions that require us to go back to
3842 * ring-3 for one reason or another.
3843 *
3844 * @returns Strict VBox status code (i.e. informational status codes too)
3845 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3846 * ring-3.
3847 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3848 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3849 * interrupts)
3850 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3851 * all EMTs to be in ring-3.
3852 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3853 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3854 * to the EM loop.
3855 *
3856 * @param pVCpu The cross context virtual CPU structure.
3857 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3858 * @param fStepping Whether we are single-stepping the guest using the
3859 * hypervisor debugger.
3860 *
3861 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3862 * is no longer in VMX non-root mode.
3863 */
3864static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3865{
3866#ifndef IN_NEM_DARWIN
3867 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3868#endif
3869
3870 /*
3871 * Update pending interrupts into the APIC's IRR.
3872 */
3873 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3874 APICUpdatePendingInterrupts(pVCpu);
3875
3876 /*
3877 * Anything pending? Should be more likely than not if we're doing a good job.
3878 */
3879 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3880 if ( !fStepping
3881 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3882 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3883 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3884 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3885 return VINF_SUCCESS;
3886
3887 /* Pending PGM C3 sync. */
3888 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3889 {
3890 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3891 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3892 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3893 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3894 if (rcStrict != VINF_SUCCESS)
3895 {
3896 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3897 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3898 return rcStrict;
3899 }
3900 }
3901
3902 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3903 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3904 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3905 {
3906 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3907 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3908 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
3909 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
3910 return rc;
3911 }
3912
3913 /* Pending VM request packets, such as hardware interrupts. */
3914 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3915 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3916 {
3917 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3918 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3919 return VINF_EM_PENDING_REQUEST;
3920 }
3921
3922 /* Pending PGM pool flushes. */
3923 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3924 {
3925 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3926 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3927 return VINF_PGM_POOL_FLUSH_PENDING;
3928 }
3929
3930 /* Pending DMA requests. */
3931 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3932 {
3933 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3934 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3935 return VINF_EM_RAW_TO_R3;
3936 }
3937
3938#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3939 /*
3940 * Pending nested-guest events.
3941 *
3942 * Please note the priority of these events are specified and important.
3943 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3944 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3945 */
3946 if (fIsNestedGuest)
3947 {
3948 /* Pending nested-guest APIC-write. */
3949 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3950 {
3951 Log4Func(("Pending nested-guest APIC-write\n"));
3952 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3953 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3954 return rcStrict;
3955 }
3956
3957 /* Pending nested-guest monitor-trap flag (MTF). */
3958 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3959 {
3960 Log4Func(("Pending nested-guest MTF\n"));
3961 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3962 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3963 return rcStrict;
3964 }
3965
3966 /* Pending nested-guest VMX-preemption timer expired. */
3967 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3968 {
3969 Log4Func(("Pending nested-guest preempt timer\n"));
3970 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3971 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3972 return rcStrict;
3973 }
3974 }
3975#else
3976 NOREF(fIsNestedGuest);
3977#endif
3978
3979 return VINF_SUCCESS;
3980}
3981
3982
3983/**
3984 * Converts any TRPM trap into a pending HM event. This is typically used when
3985 * entering from ring-3 (not longjmp returns).
3986 *
3987 * @param pVCpu The cross context virtual CPU structure.
3988 */
3989static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3990{
3991 Assert(TRPMHasTrap(pVCpu));
3992 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3993
3994 uint8_t uVector;
3995 TRPMEVENT enmTrpmEvent;
3996 uint32_t uErrCode;
3997 RTGCUINTPTR GCPtrFaultAddress;
3998 uint8_t cbInstr;
3999 bool fIcebp;
4000
4001 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4002 AssertRC(rc);
4003
4004 uint32_t u32IntInfo;
4005 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4006 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4007
4008 rc = TRPMResetTrap(pVCpu);
4009 AssertRC(rc);
4010 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4011 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4012
4013 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4014}
4015
4016
4017/**
4018 * Converts the pending HM event into a TRPM trap.
4019 *
4020 * @param pVCpu The cross context virtual CPU structure.
4021 */
4022static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4023{
4024 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4025
4026 /* If a trap was already pending, we did something wrong! */
4027 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4028
4029 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4030 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4031 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4032
4033 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4034
4035 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4036 AssertRC(rc);
4037
4038 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4039 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4040
4041 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4042 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4043 else
4044 {
4045 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4046 switch (uVectorType)
4047 {
4048 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4049 TRPMSetTrapDueToIcebp(pVCpu);
4050 RT_FALL_THRU();
4051 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4052 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4053 {
4054 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4055 || ( uVector == X86_XCPT_BP /* INT3 */
4056 || uVector == X86_XCPT_OF /* INTO */
4057 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4058 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4059 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4060 break;
4061 }
4062 }
4063 }
4064
4065 /* We're now done converting the pending event. */
4066 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4067}
4068
4069
4070/**
4071 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4072 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4073 *
4074 * @param pVCpu The cross context virtual CPU structure.
4075 * @param pVmcsInfo The VMCS info. object.
4076 */
4077static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4078{
4079 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4080 {
4081 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4082 {
4083 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4084 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4085 AssertRC(rc);
4086 }
4087 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4088}
4089
4090
4091/**
4092 * Clears the interrupt-window exiting control in the VMCS.
4093 *
4094 * @param pVCpu The cross context virtual CPU structure.
4095 * @param pVmcsInfo The VMCS info. object.
4096 */
4097DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4098{
4099 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4100 {
4101 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4102 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4103 AssertRC(rc);
4104 }
4105}
4106
4107
4108/**
4109 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4110 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4111 *
4112 * @param pVCpu The cross context virtual CPU structure.
4113 * @param pVmcsInfo The VMCS info. object.
4114 */
4115static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4116{
4117 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4118 {
4119 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4120 {
4121 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4122 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4123 AssertRC(rc);
4124 Log4Func(("Setup NMI-window exiting\n"));
4125 }
4126 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4127}
4128
4129
4130/**
4131 * Clears the NMI-window exiting control in the VMCS.
4132 *
4133 * @param pVCpu The cross context virtual CPU structure.
4134 * @param pVmcsInfo The VMCS info. object.
4135 */
4136DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4137{
4138 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4139 {
4140 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4141 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4142 AssertRC(rc);
4143 }
4144}
4145
4146
4147/**
4148 * Injects an event into the guest upon VM-entry by updating the relevant fields
4149 * in the VM-entry area in the VMCS.
4150 *
4151 * @returns Strict VBox status code (i.e. informational status codes too).
4152 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4153 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4154 *
4155 * @param pVCpu The cross context virtual CPU structure.
4156 * @param pVmcsInfo The VMCS info object.
4157 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4158 * @param pEvent The event being injected.
4159 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4160 * will be updated if necessary. This cannot not be NULL.
4161 * @param fStepping Whether we're single-stepping guest execution and should
4162 * return VINF_EM_DBG_STEPPED if the event is injected
4163 * directly (registers modified by us, not by hardware on
4164 * VM-entry).
4165 */
4166static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4167 bool fStepping, uint32_t *pfIntrState)
4168{
4169 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4170 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4171 Assert(pfIntrState);
4172
4173#ifdef IN_NEM_DARWIN
4174 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4175#endif
4176
4177 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4178 uint32_t u32IntInfo = pEvent->u64IntInfo;
4179 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4180 uint32_t const cbInstr = pEvent->cbInstr;
4181 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4182 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4183 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4184
4185#ifdef VBOX_STRICT
4186 /*
4187 * Validate the error-code-valid bit for hardware exceptions.
4188 * No error codes for exceptions in real-mode.
4189 *
4190 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4191 */
4192 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4193 && !CPUMIsGuestInRealModeEx(pCtx))
4194 {
4195 switch (uVector)
4196 {
4197 case X86_XCPT_PF:
4198 case X86_XCPT_DF:
4199 case X86_XCPT_TS:
4200 case X86_XCPT_NP:
4201 case X86_XCPT_SS:
4202 case X86_XCPT_GP:
4203 case X86_XCPT_AC:
4204 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4205 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4206 RT_FALL_THRU();
4207 default:
4208 break;
4209 }
4210 }
4211
4212 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4213 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4214 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4215#endif
4216
4217 RT_NOREF(uVector);
4218 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4219 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4220 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4221 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4222 {
4223 Assert(uVector <= X86_XCPT_LAST);
4224 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4225 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4226 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4227 }
4228 else
4229 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4230
4231 /*
4232 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4233 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4234 * interrupt handler in the (real-mode) guest.
4235 *
4236 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4237 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4238 */
4239 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4240 {
4241#ifndef IN_NEM_DARWIN
4242 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4243#endif
4244 {
4245 /*
4246 * For CPUs with unrestricted guest execution enabled and with the guest
4247 * in real-mode, we must not set the deliver-error-code bit.
4248 *
4249 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4250 */
4251 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4252 }
4253#ifndef IN_NEM_DARWIN
4254 else
4255 {
4256 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4257 Assert(PDMVmmDevHeapIsEnabled(pVM));
4258 Assert(pVM->hm.s.vmx.pRealModeTSS);
4259 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4260
4261 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4262 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4263 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4264 AssertRCReturn(rc2, rc2);
4265
4266 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4267 size_t const cbIdtEntry = sizeof(X86IDTR16);
4268 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4269 {
4270 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4271 if (uVector == X86_XCPT_DF)
4272 return VINF_EM_RESET;
4273
4274 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4275 No error codes for exceptions in real-mode. */
4276 if (uVector == X86_XCPT_GP)
4277 {
4278 static HMEVENT const s_EventXcptDf
4279 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4280 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4281 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4282 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4283 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4284 }
4285
4286 /*
4287 * If we're injecting an event with no valid IDT entry, inject a #GP.
4288 * No error codes for exceptions in real-mode.
4289 *
4290 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4291 */
4292 static HMEVENT const s_EventXcptGp
4293 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4294 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4295 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4296 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4297 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4298 }
4299
4300 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4301 uint16_t uGuestIp = pCtx->ip;
4302 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4303 {
4304 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4305 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4306 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4307 }
4308 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4309 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4310
4311 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4312 X86IDTR16 IdtEntry;
4313 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4314 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4315 AssertRCReturn(rc2, rc2);
4316
4317 /* Construct the stack frame for the interrupt/exception handler. */
4318 VBOXSTRICTRC rcStrict;
4319 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4320 if (rcStrict == VINF_SUCCESS)
4321 {
4322 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4323 if (rcStrict == VINF_SUCCESS)
4324 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4325 }
4326
4327 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4328 if (rcStrict == VINF_SUCCESS)
4329 {
4330 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4331 pCtx->rip = IdtEntry.offSel;
4332 pCtx->cs.Sel = IdtEntry.uSel;
4333 pCtx->cs.ValidSel = IdtEntry.uSel;
4334 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4335 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4336 && uVector == X86_XCPT_PF)
4337 pCtx->cr2 = GCPtrFault;
4338
4339 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4340 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4341 | HM_CHANGED_GUEST_RSP);
4342
4343 /*
4344 * If we delivered a hardware exception (other than an NMI) and if there was
4345 * block-by-STI in effect, we should clear it.
4346 */
4347 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4348 {
4349 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4350 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4351 Log4Func(("Clearing inhibition due to STI\n"));
4352 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4353 }
4354
4355 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4356 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4357
4358 /*
4359 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4360 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4361 */
4362 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4363
4364 /*
4365 * If we eventually support nested-guest execution without unrestricted guest execution,
4366 * we should set fInterceptEvents here.
4367 */
4368 Assert(!fIsNestedGuest);
4369
4370 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4371 if (fStepping)
4372 rcStrict = VINF_EM_DBG_STEPPED;
4373 }
4374 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4375 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4376 return rcStrict;
4377 }
4378#else
4379 RT_NOREF(pVmcsInfo);
4380#endif
4381 }
4382
4383 /*
4384 * Validate.
4385 */
4386 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4387 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4388
4389 /*
4390 * Inject the event into the VMCS.
4391 */
4392 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4393 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4394 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4395 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4396 AssertRC(rc);
4397
4398 /*
4399 * Update guest CR2 if this is a page-fault.
4400 */
4401 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4402 pCtx->cr2 = GCPtrFault;
4403
4404 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4405 return VINF_SUCCESS;
4406}
4407
4408
4409/**
4410 * Evaluates the event to be delivered to the guest and sets it as the pending
4411 * event.
4412 *
4413 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4414 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4415 * NOT restore these force-flags.
4416 *
4417 * @returns Strict VBox status code (i.e. informational status codes too).
4418 * @param pVCpu The cross context virtual CPU structure.
4419 * @param pVmcsInfo The VMCS information structure.
4420 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4421 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4422 */
4423static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4424{
4425 Assert(pfIntrState);
4426 Assert(!TRPMHasTrap(pVCpu));
4427
4428 /*
4429 * Compute/update guest-interruptibility state related FFs.
4430 * The FFs will be used below while evaluating events to be injected.
4431 */
4432 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4433
4434 /*
4435 * Evaluate if a new event needs to be injected.
4436 * An event that's already pending has already performed all necessary checks.
4437 */
4438 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4439 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4440 {
4441 /** @todo SMI. SMIs take priority over NMIs. */
4442
4443 /*
4444 * NMIs.
4445 * NMIs take priority over external interrupts.
4446 */
4447#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4448 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4449#endif
4450 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4451 {
4452 /*
4453 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4454 *
4455 * For a nested-guest, the FF always indicates the outer guest's ability to
4456 * receive an NMI while the guest-interruptibility state bit depends on whether
4457 * the nested-hypervisor is using virtual-NMIs.
4458 */
4459 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4460 {
4461#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4462 if ( fIsNestedGuest
4463 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4464 return IEMExecVmxVmexitXcptNmi(pVCpu);
4465#endif
4466 vmxHCSetPendingXcptNmi(pVCpu);
4467 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4468 Log4Func(("NMI pending injection\n"));
4469
4470 /* We've injected the NMI, bail. */
4471 return VINF_SUCCESS;
4472 }
4473 else if (!fIsNestedGuest)
4474 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4475 }
4476
4477 /*
4478 * External interrupts (PIC/APIC).
4479 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4480 * We cannot re-request the interrupt from the controller again.
4481 */
4482 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4483 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4484 {
4485 Assert(!DBGFIsStepping(pVCpu));
4486 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4487 AssertRC(rc);
4488
4489 /*
4490 * We must not check EFLAGS directly when executing a nested-guest, use
4491 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4492 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4493 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4494 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4495 *
4496 * See Intel spec. 25.4.1 "Event Blocking".
4497 */
4498 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4499 {
4500#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4501 if ( fIsNestedGuest
4502 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4503 {
4504 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4505 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4506 return rcStrict;
4507 }
4508#endif
4509 uint8_t u8Interrupt;
4510 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4511 if (RT_SUCCESS(rc))
4512 {
4513#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4514 if ( fIsNestedGuest
4515 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4516 {
4517 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4518 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4519 return rcStrict;
4520 }
4521#endif
4522 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4523 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4524 }
4525 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4526 {
4527 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4528
4529 if ( !fIsNestedGuest
4530 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4531 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4532 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4533
4534 /*
4535 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4536 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4537 * need to re-set this force-flag here.
4538 */
4539 }
4540 else
4541 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4542
4543 /* We've injected the interrupt or taken necessary action, bail. */
4544 return VINF_SUCCESS;
4545 }
4546 if (!fIsNestedGuest)
4547 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4548 }
4549 }
4550 else if (!fIsNestedGuest)
4551 {
4552 /*
4553 * An event is being injected or we are in an interrupt shadow. Check if another event is
4554 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4555 * the pending event.
4556 */
4557 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4558 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4559 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4560 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4561 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4562 }
4563 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4564
4565 return VINF_SUCCESS;
4566}
4567
4568
4569/**
4570 * Injects any pending events into the guest if the guest is in a state to
4571 * receive them.
4572 *
4573 * @returns Strict VBox status code (i.e. informational status codes too).
4574 * @param pVCpu The cross context virtual CPU structure.
4575 * @param pVmcsInfo The VMCS information structure.
4576 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4577 * @param fIntrState The VT-x guest-interruptibility state.
4578 * @param fStepping Whether we are single-stepping the guest using the
4579 * hypervisor debugger and should return
4580 * VINF_EM_DBG_STEPPED if the event was dispatched
4581 * directly.
4582 */
4583static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4584 uint32_t fIntrState, bool fStepping)
4585{
4586 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4587#ifndef IN_NEM_DARWIN
4588 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4589#endif
4590
4591#ifdef VBOX_STRICT
4592 /*
4593 * Verify guest-interruptibility state.
4594 *
4595 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4596 * since injecting an event may modify the interruptibility state and we must thus always
4597 * use fIntrState.
4598 */
4599 {
4600 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4601 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4602 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4603 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4604 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4605 Assert(!TRPMHasTrap(pVCpu));
4606 NOREF(fBlockMovSS); NOREF(fBlockSti);
4607 }
4608#endif
4609
4610 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4611 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4612 {
4613 /*
4614 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4615 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4616 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4617 *
4618 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4619 */
4620 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4621#ifdef VBOX_STRICT
4622 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4623 {
4624 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4625 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4626 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4627 }
4628 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4629 {
4630 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4631 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4632 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4633 }
4634#endif
4635 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4636 uIntType));
4637
4638 /*
4639 * Inject the event and get any changes to the guest-interruptibility state.
4640 *
4641 * The guest-interruptibility state may need to be updated if we inject the event
4642 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4643 */
4644 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4645 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4646
4647 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4648 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4649 else
4650 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4651 }
4652
4653 /*
4654 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4655 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4656 */
4657 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4658 && !fIsNestedGuest)
4659 {
4660 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4661
4662 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4663 {
4664 /*
4665 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4666 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4667 */
4668 Assert(!DBGFIsStepping(pVCpu));
4669 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4670 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4671 AssertRC(rc);
4672 }
4673 else
4674 {
4675 /*
4676 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4677 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4678 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4679 * we use MTF, so just make sure it's called before executing guest-code.
4680 */
4681 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4682 }
4683 }
4684 /* else: for nested-guest currently handling while merging controls. */
4685
4686 /*
4687 * Finally, update the guest-interruptibility state.
4688 *
4689 * This is required for the real-on-v86 software interrupt injection, for
4690 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4691 */
4692 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4693 AssertRC(rc);
4694
4695 /*
4696 * There's no need to clear the VM-entry interruption-information field here if we're not
4697 * injecting anything. VT-x clears the valid bit on every VM-exit.
4698 *
4699 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4700 */
4701
4702 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4703 return rcStrict;
4704}
4705
4706
4707/**
4708 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4709 * and update error record fields accordingly.
4710 *
4711 * @returns VMX_IGS_* error codes.
4712 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4713 * wrong with the guest state.
4714 *
4715 * @param pVCpu The cross context virtual CPU structure.
4716 * @param pVmcsInfo The VMCS info. object.
4717 *
4718 * @remarks This function assumes our cache of the VMCS controls
4719 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4720 */
4721static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4722{
4723#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4724#define HMVMX_CHECK_BREAK(expr, err) do { \
4725 if (!(expr)) { uError = (err); break; } \
4726 } while (0)
4727
4728 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4729 uint32_t uError = VMX_IGS_ERROR;
4730 uint32_t u32IntrState = 0;
4731#ifndef IN_NEM_DARWIN
4732 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4733 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4734#else
4735 bool const fUnrestrictedGuest = true;
4736#endif
4737 do
4738 {
4739 int rc;
4740
4741 /*
4742 * Guest-interruptibility state.
4743 *
4744 * Read this first so that any check that fails prior to those that actually
4745 * require the guest-interruptibility state would still reflect the correct
4746 * VMCS value and avoids causing further confusion.
4747 */
4748 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4749 AssertRC(rc);
4750
4751 uint32_t u32Val;
4752 uint64_t u64Val;
4753
4754 /*
4755 * CR0.
4756 */
4757 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4758 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4759 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4760 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4761 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4762 if (fUnrestrictedGuest)
4763 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4764
4765 uint64_t u64GuestCr0;
4766 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4767 AssertRC(rc);
4768 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4769 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4770 if ( !fUnrestrictedGuest
4771 && (u64GuestCr0 & X86_CR0_PG)
4772 && !(u64GuestCr0 & X86_CR0_PE))
4773 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4774
4775 /*
4776 * CR4.
4777 */
4778 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4779 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4780 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4781
4782 uint64_t u64GuestCr4;
4783 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4784 AssertRC(rc);
4785 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4786 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4787
4788 /*
4789 * IA32_DEBUGCTL MSR.
4790 */
4791 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4792 AssertRC(rc);
4793 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4794 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4795 {
4796 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4797 }
4798 uint64_t u64DebugCtlMsr = u64Val;
4799
4800#ifdef VBOX_STRICT
4801 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4802 AssertRC(rc);
4803 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4804#endif
4805 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4806
4807 /*
4808 * RIP and RFLAGS.
4809 */
4810 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4811 AssertRC(rc);
4812 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4813 if ( !fLongModeGuest
4814 || !pCtx->cs.Attr.n.u1Long)
4815 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4816 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4817 * must be identical if the "IA-32e mode guest" VM-entry
4818 * control is 1 and CS.L is 1. No check applies if the
4819 * CPU supports 64 linear-address bits. */
4820
4821 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4822 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4823 AssertRC(rc);
4824 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4825 VMX_IGS_RFLAGS_RESERVED);
4826 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4827 uint32_t const u32Eflags = u64Val;
4828
4829 if ( fLongModeGuest
4830 || ( fUnrestrictedGuest
4831 && !(u64GuestCr0 & X86_CR0_PE)))
4832 {
4833 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4834 }
4835
4836 uint32_t u32EntryInfo;
4837 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4838 AssertRC(rc);
4839 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4840 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4841
4842 /*
4843 * 64-bit checks.
4844 */
4845 if (fLongModeGuest)
4846 {
4847 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4848 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4849 }
4850
4851 if ( !fLongModeGuest
4852 && (u64GuestCr4 & X86_CR4_PCIDE))
4853 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4854
4855 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4856 * 51:32 beyond the processor's physical-address width are 0. */
4857
4858 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4859 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4860 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4861
4862#ifndef IN_NEM_DARWIN
4863 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4864 AssertRC(rc);
4865 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4866
4867 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4868 AssertRC(rc);
4869 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4870#endif
4871
4872 /*
4873 * PERF_GLOBAL MSR.
4874 */
4875 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4876 {
4877 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4878 AssertRC(rc);
4879 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4880 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4881 }
4882
4883 /*
4884 * PAT MSR.
4885 */
4886 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4887 {
4888 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4889 AssertRC(rc);
4890 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4891 for (unsigned i = 0; i < 8; i++)
4892 {
4893 uint8_t u8Val = (u64Val & 0xff);
4894 if ( u8Val != 0 /* UC */
4895 && u8Val != 1 /* WC */
4896 && u8Val != 4 /* WT */
4897 && u8Val != 5 /* WP */
4898 && u8Val != 6 /* WB */
4899 && u8Val != 7 /* UC- */)
4900 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4901 u64Val >>= 8;
4902 }
4903 }
4904
4905 /*
4906 * EFER MSR.
4907 */
4908 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4909 {
4910 Assert(g_fHmVmxSupportsVmcsEfer);
4911 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4912 AssertRC(rc);
4913 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4914 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4915 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4916 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4917 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4918 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4919 * iemVmxVmentryCheckGuestState(). */
4920 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4921 || !(u64GuestCr0 & X86_CR0_PG)
4922 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4923 VMX_IGS_EFER_LMA_LME_MISMATCH);
4924 }
4925
4926 /*
4927 * Segment registers.
4928 */
4929 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4930 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4931 if (!(u32Eflags & X86_EFL_VM))
4932 {
4933 /* CS */
4934 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4935 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4936 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4937 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4938 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4939 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4940 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4941 /* CS cannot be loaded with NULL in protected mode. */
4942 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4943 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4944 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4945 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4946 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4947 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4948 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4949 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4950 else
4951 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4952
4953 /* SS */
4954 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4955 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4956 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4957 if ( !(pCtx->cr0 & X86_CR0_PE)
4958 || pCtx->cs.Attr.n.u4Type == 3)
4959 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4960
4961 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4962 {
4963 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4964 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4965 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4966 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4967 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4968 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4969 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4970 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4971 }
4972
4973 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4974 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4975 {
4976 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4977 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4978 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4979 || pCtx->ds.Attr.n.u4Type > 11
4980 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4981 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4982 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4983 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4984 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4985 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4986 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4987 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4988 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4989 }
4990 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4991 {
4992 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4993 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4994 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4995 || pCtx->es.Attr.n.u4Type > 11
4996 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4997 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4998 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4999 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5000 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5001 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5002 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5003 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5004 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5005 }
5006 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5007 {
5008 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5009 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5010 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5011 || pCtx->fs.Attr.n.u4Type > 11
5012 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5013 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5014 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5015 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5016 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5017 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5018 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5019 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5020 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5021 }
5022 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5023 {
5024 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5025 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5026 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5027 || pCtx->gs.Attr.n.u4Type > 11
5028 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5029 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5030 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5031 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5032 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5033 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5034 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5035 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5036 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5037 }
5038 /* 64-bit capable CPUs. */
5039 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5040 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5041 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5042 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5043 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5044 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5045 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5046 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5047 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5048 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5049 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5050 }
5051 else
5052 {
5053 /* V86 mode checks. */
5054 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5055 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5056 {
5057 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5058 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5059 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5060 }
5061 else
5062 {
5063 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5064 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5065 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5066 }
5067
5068 /* CS */
5069 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5070 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5071 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5072 /* SS */
5073 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5074 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5075 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5076 /* DS */
5077 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5078 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5079 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5080 /* ES */
5081 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5082 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5083 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5084 /* FS */
5085 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5086 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5087 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5088 /* GS */
5089 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5090 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5091 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5092 /* 64-bit capable CPUs. */
5093 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5094 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5095 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5096 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5097 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5098 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5099 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5100 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5101 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5102 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5103 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5104 }
5105
5106 /*
5107 * TR.
5108 */
5109 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5110 /* 64-bit capable CPUs. */
5111 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5112 if (fLongModeGuest)
5113 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5114 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5115 else
5116 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5117 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5118 VMX_IGS_TR_ATTR_TYPE_INVALID);
5119 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5120 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5121 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5122 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5123 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5124 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5125 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5126 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5127
5128 /*
5129 * GDTR and IDTR (64-bit capable checks).
5130 */
5131 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5132 AssertRC(rc);
5133 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5134
5135 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5136 AssertRC(rc);
5137 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5138
5139 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5140 AssertRC(rc);
5141 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5142
5143 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5144 AssertRC(rc);
5145 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5146
5147 /*
5148 * Guest Non-Register State.
5149 */
5150 /* Activity State. */
5151 uint32_t u32ActivityState;
5152 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5153 AssertRC(rc);
5154 HMVMX_CHECK_BREAK( !u32ActivityState
5155 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5156 VMX_IGS_ACTIVITY_STATE_INVALID);
5157 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5158 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5159
5160 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5161 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5162 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5163
5164 /** @todo Activity state and injecting interrupts. Left as a todo since we
5165 * currently don't use activity states but ACTIVE. */
5166
5167 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5168 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5169
5170 /* Guest interruptibility-state. */
5171 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5172 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5173 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5174 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5175 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5176 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5177 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5178 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5179 {
5180 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5181 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5182 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5183 }
5184 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5185 {
5186 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5187 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5188 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5189 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5190 }
5191 /** @todo Assumes the processor is not in SMM. */
5192 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5193 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5194 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5195 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5196 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5197 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5198 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5199 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5200
5201 /* Pending debug exceptions. */
5202 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5203 AssertRC(rc);
5204 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5205 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5206 u32Val = u64Val; /* For pending debug exceptions checks below. */
5207
5208 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5209 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5210 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5211 {
5212 if ( (u32Eflags & X86_EFL_TF)
5213 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5214 {
5215 /* Bit 14 is PendingDebug.BS. */
5216 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5217 }
5218 if ( !(u32Eflags & X86_EFL_TF)
5219 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5220 {
5221 /* Bit 14 is PendingDebug.BS. */
5222 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5223 }
5224 }
5225
5226#ifndef IN_NEM_DARWIN
5227 /* VMCS link pointer. */
5228 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5229 AssertRC(rc);
5230 if (u64Val != UINT64_C(0xffffffffffffffff))
5231 {
5232 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5233 /** @todo Bits beyond the processor's physical-address width MBZ. */
5234 /** @todo SMM checks. */
5235 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5236 Assert(pVmcsInfo->pvShadowVmcs);
5237 VMXVMCSREVID VmcsRevId;
5238 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5239 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5240 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5241 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5242 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5243 }
5244
5245 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5246 * not using nested paging? */
5247 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5248 && !fLongModeGuest
5249 && CPUMIsGuestInPAEModeEx(pCtx))
5250 {
5251 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5252 AssertRC(rc);
5253 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5254
5255 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5256 AssertRC(rc);
5257 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5258
5259 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5260 AssertRC(rc);
5261 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5262
5263 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5264 AssertRC(rc);
5265 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5266 }
5267#endif
5268
5269 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5270 if (uError == VMX_IGS_ERROR)
5271 uError = VMX_IGS_REASON_NOT_FOUND;
5272 } while (0);
5273
5274 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5275 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5276 return uError;
5277
5278#undef HMVMX_ERROR_BREAK
5279#undef HMVMX_CHECK_BREAK
5280}
5281
5282
5283#ifndef HMVMX_USE_FUNCTION_TABLE
5284/**
5285 * Handles a guest VM-exit from hardware-assisted VMX execution.
5286 *
5287 * @returns Strict VBox status code (i.e. informational status codes too).
5288 * @param pVCpu The cross context virtual CPU structure.
5289 * @param pVmxTransient The VMX-transient structure.
5290 */
5291DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5292{
5293#ifdef DEBUG_ramshankar
5294# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5295 do { \
5296 if (a_fSave != 0) \
5297 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5298 VBOXSTRICTRC rcStrict = a_CallExpr; \
5299 if (a_fSave != 0) \
5300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5301 return rcStrict; \
5302 } while (0)
5303#else
5304# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5305#endif
5306 uint32_t const uExitReason = pVmxTransient->uExitReason;
5307 switch (uExitReason)
5308 {
5309 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5310 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5311 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5312 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5313 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5314 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5315 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5316 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5317 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5318 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5319 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5320 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5321 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5322 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5323 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5324 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5325 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5326 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5327 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5328 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5329 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5330 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5331 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5332 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5333 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5334 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5335 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5336 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5337 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5338 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5339#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5340 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5341 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5342 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5343 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5344 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5345 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5346 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5347 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5348 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5349 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5350#else
5351 case VMX_EXIT_VMCLEAR:
5352 case VMX_EXIT_VMLAUNCH:
5353 case VMX_EXIT_VMPTRLD:
5354 case VMX_EXIT_VMPTRST:
5355 case VMX_EXIT_VMREAD:
5356 case VMX_EXIT_VMRESUME:
5357 case VMX_EXIT_VMWRITE:
5358 case VMX_EXIT_VMXOFF:
5359 case VMX_EXIT_VMXON:
5360 case VMX_EXIT_INVVPID:
5361 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5362#endif
5363#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5364 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5365#else
5366 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5367#endif
5368
5369 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5370 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5371 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5372
5373 case VMX_EXIT_INIT_SIGNAL:
5374 case VMX_EXIT_SIPI:
5375 case VMX_EXIT_IO_SMI:
5376 case VMX_EXIT_SMI:
5377 case VMX_EXIT_ERR_MSR_LOAD:
5378 case VMX_EXIT_ERR_MACHINE_CHECK:
5379 case VMX_EXIT_PML_FULL:
5380 case VMX_EXIT_VIRTUALIZED_EOI:
5381 case VMX_EXIT_GDTR_IDTR_ACCESS:
5382 case VMX_EXIT_LDTR_TR_ACCESS:
5383 case VMX_EXIT_APIC_WRITE:
5384 case VMX_EXIT_RDRAND:
5385 case VMX_EXIT_RSM:
5386 case VMX_EXIT_VMFUNC:
5387 case VMX_EXIT_ENCLS:
5388 case VMX_EXIT_RDSEED:
5389 case VMX_EXIT_XSAVES:
5390 case VMX_EXIT_XRSTORS:
5391 case VMX_EXIT_UMWAIT:
5392 case VMX_EXIT_TPAUSE:
5393 case VMX_EXIT_LOADIWKEY:
5394 default:
5395 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5396 }
5397#undef VMEXIT_CALL_RET
5398}
5399#endif /* !HMVMX_USE_FUNCTION_TABLE */
5400
5401
5402#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5403/**
5404 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5405 *
5406 * @returns Strict VBox status code (i.e. informational status codes too).
5407 * @param pVCpu The cross context virtual CPU structure.
5408 * @param pVmxTransient The VMX-transient structure.
5409 */
5410DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5411{
5412 uint32_t const uExitReason = pVmxTransient->uExitReason;
5413 switch (uExitReason)
5414 {
5415# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5416 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5417 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5418# else
5419 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5420 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5421# endif
5422 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5423 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5424 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5425
5426 /*
5427 * We shouldn't direct host physical interrupts to the nested-guest.
5428 */
5429 case VMX_EXIT_EXT_INT:
5430 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5431
5432 /*
5433 * Instructions that cause VM-exits unconditionally or the condition is
5434 * always taken solely from the nested hypervisor (meaning if the VM-exit
5435 * happens, it's guaranteed to be a nested-guest VM-exit).
5436 *
5437 * - Provides VM-exit instruction length ONLY.
5438 */
5439 case VMX_EXIT_CPUID: /* Unconditional. */
5440 case VMX_EXIT_VMCALL:
5441 case VMX_EXIT_GETSEC:
5442 case VMX_EXIT_INVD:
5443 case VMX_EXIT_XSETBV:
5444 case VMX_EXIT_VMLAUNCH:
5445 case VMX_EXIT_VMRESUME:
5446 case VMX_EXIT_VMXOFF:
5447 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5448 case VMX_EXIT_VMFUNC:
5449 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5450
5451 /*
5452 * Instructions that cause VM-exits unconditionally or the condition is
5453 * always taken solely from the nested hypervisor (meaning if the VM-exit
5454 * happens, it's guaranteed to be a nested-guest VM-exit).
5455 *
5456 * - Provides VM-exit instruction length.
5457 * - Provides VM-exit information.
5458 * - Optionally provides Exit qualification.
5459 *
5460 * Since Exit qualification is 0 for all VM-exits where it is not
5461 * applicable, reading and passing it to the guest should produce
5462 * defined behavior.
5463 *
5464 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5465 */
5466 case VMX_EXIT_INVEPT: /* Unconditional. */
5467 case VMX_EXIT_INVVPID:
5468 case VMX_EXIT_VMCLEAR:
5469 case VMX_EXIT_VMPTRLD:
5470 case VMX_EXIT_VMPTRST:
5471 case VMX_EXIT_VMXON:
5472 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5473 case VMX_EXIT_LDTR_TR_ACCESS:
5474 case VMX_EXIT_RDRAND:
5475 case VMX_EXIT_RDSEED:
5476 case VMX_EXIT_XSAVES:
5477 case VMX_EXIT_XRSTORS:
5478 case VMX_EXIT_UMWAIT:
5479 case VMX_EXIT_TPAUSE:
5480 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5481
5482 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5483 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5484 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5485 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5486 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5487 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5492 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5493 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5494 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5495 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5496 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5497 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5498 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5499 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5500 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5501
5502 case VMX_EXIT_PREEMPT_TIMER:
5503 {
5504 /** @todo NSTVMX: Preempt timer. */
5505 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5506 }
5507
5508 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5509 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5510
5511 case VMX_EXIT_VMREAD:
5512 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5513
5514 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5515 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5516
5517 case VMX_EXIT_INIT_SIGNAL:
5518 case VMX_EXIT_SIPI:
5519 case VMX_EXIT_IO_SMI:
5520 case VMX_EXIT_SMI:
5521 case VMX_EXIT_ERR_MSR_LOAD:
5522 case VMX_EXIT_ERR_MACHINE_CHECK:
5523 case VMX_EXIT_PML_FULL:
5524 case VMX_EXIT_RSM:
5525 default:
5526 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5527 }
5528}
5529#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5530
5531
5532/** @name VM-exit helpers.
5533 * @{
5534 */
5535/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5536/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5537/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5538
5539/** Macro for VM-exits called unexpectedly. */
5540#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5541 do { \
5542 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5543 return VERR_VMX_UNEXPECTED_EXIT; \
5544 } while (0)
5545
5546#ifdef VBOX_STRICT
5547# ifndef IN_NEM_DARWIN
5548/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5549# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5550 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5551
5552# define HMVMX_ASSERT_PREEMPT_CPUID() \
5553 do { \
5554 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5555 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5556 } while (0)
5557
5558# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5559 do { \
5560 AssertPtr((a_pVCpu)); \
5561 AssertPtr((a_pVmxTransient)); \
5562 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
5563 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
5564 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
5565 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
5566 Assert((a_pVmxTransient)->pVmcsInfo); \
5567 Assert(ASMIntAreEnabled()); \
5568 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5569 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5570 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5571 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5572 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5573 HMVMX_ASSERT_PREEMPT_CPUID(); \
5574 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5575 } while (0)
5576# else
5577# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5578# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5579# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5580 do { \
5581 AssertPtr((a_pVCpu)); \
5582 AssertPtr((a_pVmxTransient)); \
5583 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
5584 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
5585 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
5586 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
5587 Assert((a_pVmxTransient)->pVmcsInfo); \
5588 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5589 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5590 } while (0)
5591# endif
5592
5593# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5594 do { \
5595 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5596 Assert((a_pVmxTransient)->fIsNestedGuest); \
5597 } while (0)
5598
5599# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5600 do { \
5601 Log4Func(("\n")); \
5602 } while (0)
5603#else
5604# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5605 do { \
5606 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5607 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5608 } while (0)
5609
5610# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5611 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5612
5613# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5614#endif
5615
5616#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5617/** Macro that does the necessary privilege checks and intercepted VM-exits for
5618 * guests that attempted to execute a VMX instruction. */
5619# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5620 do \
5621 { \
5622 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5623 if (rcStrictTmp == VINF_SUCCESS) \
5624 { /* likely */ } \
5625 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5626 { \
5627 Assert((a_pVCpu)->hm.s.Event.fPending); \
5628 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5629 return VINF_SUCCESS; \
5630 } \
5631 else \
5632 { \
5633 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5634 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5635 } \
5636 } while (0)
5637
5638/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5639# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5640 do \
5641 { \
5642 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5643 (a_pGCPtrEffAddr)); \
5644 if (rcStrictTmp == VINF_SUCCESS) \
5645 { /* likely */ } \
5646 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5647 { \
5648 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5649 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5650 NOREF(uXcptTmp); \
5651 return VINF_SUCCESS; \
5652 } \
5653 else \
5654 { \
5655 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5656 return rcStrictTmp; \
5657 } \
5658 } while (0)
5659#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5660
5661
5662/**
5663 * Advances the guest RIP by the specified number of bytes.
5664 *
5665 * @param pVCpu The cross context virtual CPU structure.
5666 * @param cbInstr Number of bytes to advance the RIP by.
5667 *
5668 * @remarks No-long-jump zone!!!
5669 */
5670DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5671{
5672 /* Advance the RIP. */
5673 pVCpu->cpum.GstCtx.rip += cbInstr;
5674 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5675
5676 /* Update interrupt inhibition. */
5677 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5678 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5679 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5680}
5681
5682
5683/**
5684 * Advances the guest RIP after reading it from the VMCS.
5685 *
5686 * @returns VBox status code, no informational status codes.
5687 * @param pVCpu The cross context virtual CPU structure.
5688 * @param pVmxTransient The VMX-transient structure.
5689 *
5690 * @remarks No-long-jump zone!!!
5691 */
5692static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5693{
5694 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5695 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5696 AssertRCReturn(rc, rc);
5697
5698 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5699 return VINF_SUCCESS;
5700}
5701
5702
5703/**
5704 * Handle a condition that occurred while delivering an event through the guest or
5705 * nested-guest IDT.
5706 *
5707 * @returns Strict VBox status code (i.e. informational status codes too).
5708 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5709 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5710 * to continue execution of the guest which will delivery the \#DF.
5711 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5712 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5713 *
5714 * @param pVCpu The cross context virtual CPU structure.
5715 * @param pVmxTransient The VMX-transient structure.
5716 *
5717 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5718 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5719 * is due to an EPT violation, PML full or SPP-related event.
5720 *
5721 * @remarks No-long-jump zone!!!
5722 */
5723static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5724{
5725 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5726 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5727 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5728 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5729 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5730 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5731
5732 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5733 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5734 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5735 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5736 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5737 {
5738 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5739 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5740
5741 /*
5742 * If the event was a software interrupt (generated with INT n) or a software exception
5743 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5744 * can handle the VM-exit and continue guest execution which will re-execute the
5745 * instruction rather than re-injecting the exception, as that can cause premature
5746 * trips to ring-3 before injection and involve TRPM which currently has no way of
5747 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5748 * the problem).
5749 */
5750 IEMXCPTRAISE enmRaise;
5751 IEMXCPTRAISEINFO fRaiseInfo;
5752 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5753 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5754 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5755 {
5756 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5757 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5758 }
5759 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5760 {
5761 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5762 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5763 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5764
5765 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5766 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5767
5768 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5769
5770 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5771 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5772 {
5773 pVmxTransient->fVectoringPF = true;
5774 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5775 }
5776 }
5777 else
5778 {
5779 /*
5780 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5781 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5782 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5783 */
5784 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5785 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5786 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5787 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5788 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5789 }
5790
5791 /*
5792 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5793 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5794 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5795 * subsequent VM-entry would fail, see @bugref{7445}.
5796 *
5797 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5798 */
5799 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5800 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5801 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5802 && CPUMIsGuestNmiBlocking(pVCpu))
5803 {
5804 CPUMSetGuestNmiBlocking(pVCpu, false);
5805 }
5806
5807 switch (enmRaise)
5808 {
5809 case IEMXCPTRAISE_CURRENT_XCPT:
5810 {
5811 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5812 Assert(rcStrict == VINF_SUCCESS);
5813 break;
5814 }
5815
5816 case IEMXCPTRAISE_PREV_EVENT:
5817 {
5818 uint32_t u32ErrCode;
5819 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5820 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5821 else
5822 u32ErrCode = 0;
5823
5824 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5825 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5826 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
5827 pVCpu->cpum.GstCtx.cr2);
5828
5829 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5830 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5831 Assert(rcStrict == VINF_SUCCESS);
5832 break;
5833 }
5834
5835 case IEMXCPTRAISE_REEXEC_INSTR:
5836 Assert(rcStrict == VINF_SUCCESS);
5837 break;
5838
5839 case IEMXCPTRAISE_DOUBLE_FAULT:
5840 {
5841 /*
5842 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
5843 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5844 */
5845 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5846 {
5847 pVmxTransient->fVectoringDoublePF = true;
5848 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5849 pVCpu->cpum.GstCtx.cr2));
5850 rcStrict = VINF_SUCCESS;
5851 }
5852 else
5853 {
5854 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5855 vmxHCSetPendingXcptDF(pVCpu);
5856 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5857 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5858 rcStrict = VINF_HM_DOUBLE_FAULT;
5859 }
5860 break;
5861 }
5862
5863 case IEMXCPTRAISE_TRIPLE_FAULT:
5864 {
5865 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5866 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5867 rcStrict = VINF_EM_RESET;
5868 break;
5869 }
5870
5871 case IEMXCPTRAISE_CPU_HANG:
5872 {
5873 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5874 rcStrict = VERR_EM_GUEST_CPU_HANG;
5875 break;
5876 }
5877
5878 default:
5879 {
5880 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5881 rcStrict = VERR_VMX_IPE_2;
5882 break;
5883 }
5884 }
5885 }
5886 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5887 && !CPUMIsGuestNmiBlocking(pVCpu))
5888 {
5889 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5890 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5891 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5892 {
5893 /*
5894 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5895 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5896 * that virtual NMIs remain blocked until the IRET execution is completed.
5897 *
5898 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5899 */
5900 CPUMSetGuestNmiBlocking(pVCpu, true);
5901 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5902 }
5903 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5904 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5905 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5906 {
5907 /*
5908 * Execution of IRET caused an EPT violation, page-modification log-full event or
5909 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5910 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5911 * that virtual NMIs remain blocked until the IRET execution is completed.
5912 *
5913 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5914 */
5915 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5916 {
5917 CPUMSetGuestNmiBlocking(pVCpu, true);
5918 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5919 }
5920 }
5921 }
5922
5923 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5924 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5925 return rcStrict;
5926}
5927
5928
5929#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5930/**
5931 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5932 * guest attempting to execute a VMX instruction.
5933 *
5934 * @returns Strict VBox status code (i.e. informational status codes too).
5935 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5936 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5937 *
5938 * @param pVCpu The cross context virtual CPU structure.
5939 * @param uExitReason The VM-exit reason.
5940 *
5941 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5942 * @remarks No-long-jump zone!!!
5943 */
5944static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5945{
5946 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5947 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5948
5949 /*
5950 * The physical CPU would have already checked the CPU mode/code segment.
5951 * We shall just assert here for paranoia.
5952 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5953 */
5954 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5955 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5956 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5957
5958 if (uExitReason == VMX_EXIT_VMXON)
5959 {
5960 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5961
5962 /*
5963 * We check CR4.VMXE because it is required to be always set while in VMX operation
5964 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5965 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5966 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5967 */
5968 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5969 {
5970 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5971 vmxHCSetPendingXcptUD(pVCpu);
5972 return VINF_HM_PENDING_XCPT;
5973 }
5974 }
5975 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5976 {
5977 /*
5978 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5979 * (other than VMXON), we need to raise a #UD.
5980 */
5981 Log4Func(("Not in VMX root mode -> #UD\n"));
5982 vmxHCSetPendingXcptUD(pVCpu);
5983 return VINF_HM_PENDING_XCPT;
5984 }
5985
5986 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5987 return VINF_SUCCESS;
5988}
5989
5990
5991/**
5992 * Decodes the memory operand of an instruction that caused a VM-exit.
5993 *
5994 * The Exit qualification field provides the displacement field for memory
5995 * operand instructions, if any.
5996 *
5997 * @returns Strict VBox status code (i.e. informational status codes too).
5998 * @retval VINF_SUCCESS if the operand was successfully decoded.
5999 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6000 * operand.
6001 * @param pVCpu The cross context virtual CPU structure.
6002 * @param uExitInstrInfo The VM-exit instruction information field.
6003 * @param enmMemAccess The memory operand's access type (read or write).
6004 * @param GCPtrDisp The instruction displacement field, if any. For
6005 * RIP-relative addressing pass RIP + displacement here.
6006 * @param pGCPtrMem Where to store the effective destination memory address.
6007 *
6008 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6009 * virtual-8086 mode hence skips those checks while verifying if the
6010 * segment is valid.
6011 */
6012static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6013 PRTGCPTR pGCPtrMem)
6014{
6015 Assert(pGCPtrMem);
6016 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6017 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6018 | CPUMCTX_EXTRN_CR0);
6019
6020 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6021 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6022 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6023
6024 VMXEXITINSTRINFO ExitInstrInfo;
6025 ExitInstrInfo.u = uExitInstrInfo;
6026 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6027 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6028 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6029 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6030 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6031 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6032 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6033 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6034 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6035
6036 /*
6037 * Validate instruction information.
6038 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6039 */
6040 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6041 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6042 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6043 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6044 AssertLogRelMsgReturn(fIsMemOperand,
6045 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6046
6047 /*
6048 * Compute the complete effective address.
6049 *
6050 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6051 * See AMD spec. 4.5.2 "Segment Registers".
6052 */
6053 RTGCPTR GCPtrMem = GCPtrDisp;
6054 if (fBaseRegValid)
6055 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6056 if (fIdxRegValid)
6057 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6058
6059 RTGCPTR const GCPtrOff = GCPtrMem;
6060 if ( !fIsLongMode
6061 || iSegReg >= X86_SREG_FS)
6062 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6063 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6064
6065 /*
6066 * Validate effective address.
6067 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6068 */
6069 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6070 Assert(cbAccess > 0);
6071 if (fIsLongMode)
6072 {
6073 if (X86_IS_CANONICAL(GCPtrMem))
6074 {
6075 *pGCPtrMem = GCPtrMem;
6076 return VINF_SUCCESS;
6077 }
6078
6079 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6080 * "Data Limit Checks in 64-bit Mode". */
6081 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6082 vmxHCSetPendingXcptGP(pVCpu, 0);
6083 return VINF_HM_PENDING_XCPT;
6084 }
6085
6086 /*
6087 * This is a watered down version of iemMemApplySegment().
6088 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6089 * and segment CPL/DPL checks are skipped.
6090 */
6091 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6092 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6093 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6094
6095 /* Check if the segment is present and usable. */
6096 if ( pSel->Attr.n.u1Present
6097 && !pSel->Attr.n.u1Unusable)
6098 {
6099 Assert(pSel->Attr.n.u1DescType);
6100 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6101 {
6102 /* Check permissions for the data segment. */
6103 if ( enmMemAccess == VMXMEMACCESS_WRITE
6104 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6105 {
6106 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6107 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6108 return VINF_HM_PENDING_XCPT;
6109 }
6110
6111 /* Check limits if it's a normal data segment. */
6112 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6113 {
6114 if ( GCPtrFirst32 > pSel->u32Limit
6115 || GCPtrLast32 > pSel->u32Limit)
6116 {
6117 Log4Func(("Data segment limit exceeded. "
6118 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6119 GCPtrLast32, pSel->u32Limit));
6120 if (iSegReg == X86_SREG_SS)
6121 vmxHCSetPendingXcptSS(pVCpu, 0);
6122 else
6123 vmxHCSetPendingXcptGP(pVCpu, 0);
6124 return VINF_HM_PENDING_XCPT;
6125 }
6126 }
6127 else
6128 {
6129 /* Check limits if it's an expand-down data segment.
6130 Note! The upper boundary is defined by the B bit, not the G bit! */
6131 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6132 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6133 {
6134 Log4Func(("Expand-down data segment limit exceeded. "
6135 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6136 GCPtrLast32, pSel->u32Limit));
6137 if (iSegReg == X86_SREG_SS)
6138 vmxHCSetPendingXcptSS(pVCpu, 0);
6139 else
6140 vmxHCSetPendingXcptGP(pVCpu, 0);
6141 return VINF_HM_PENDING_XCPT;
6142 }
6143 }
6144 }
6145 else
6146 {
6147 /* Check permissions for the code segment. */
6148 if ( enmMemAccess == VMXMEMACCESS_WRITE
6149 || ( enmMemAccess == VMXMEMACCESS_READ
6150 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6151 {
6152 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6153 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6154 vmxHCSetPendingXcptGP(pVCpu, 0);
6155 return VINF_HM_PENDING_XCPT;
6156 }
6157
6158 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6159 if ( GCPtrFirst32 > pSel->u32Limit
6160 || GCPtrLast32 > pSel->u32Limit)
6161 {
6162 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6163 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6164 if (iSegReg == X86_SREG_SS)
6165 vmxHCSetPendingXcptSS(pVCpu, 0);
6166 else
6167 vmxHCSetPendingXcptGP(pVCpu, 0);
6168 return VINF_HM_PENDING_XCPT;
6169 }
6170 }
6171 }
6172 else
6173 {
6174 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6175 vmxHCSetPendingXcptGP(pVCpu, 0);
6176 return VINF_HM_PENDING_XCPT;
6177 }
6178
6179 *pGCPtrMem = GCPtrMem;
6180 return VINF_SUCCESS;
6181}
6182#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6183
6184
6185/**
6186 * VM-exit helper for LMSW.
6187 */
6188static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6189{
6190 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6191 AssertRCReturn(rc, rc);
6192
6193 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6194 AssertMsg( rcStrict == VINF_SUCCESS
6195 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6196
6197 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6198 if (rcStrict == VINF_IEM_RAISED_XCPT)
6199 {
6200 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6201 rcStrict = VINF_SUCCESS;
6202 }
6203
6204 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6205 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6206 return rcStrict;
6207}
6208
6209
6210/**
6211 * VM-exit helper for CLTS.
6212 */
6213static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6214{
6215 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6216 AssertRCReturn(rc, rc);
6217
6218 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6219 AssertMsg( rcStrict == VINF_SUCCESS
6220 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6221
6222 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6223 if (rcStrict == VINF_IEM_RAISED_XCPT)
6224 {
6225 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6226 rcStrict = VINF_SUCCESS;
6227 }
6228
6229 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6230 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6231 return rcStrict;
6232}
6233
6234
6235/**
6236 * VM-exit helper for MOV from CRx (CRx read).
6237 */
6238static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6239{
6240 Assert(iCrReg < 16);
6241 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6242
6243 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6244 AssertRCReturn(rc, rc);
6245
6246 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6247 AssertMsg( rcStrict == VINF_SUCCESS
6248 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6249
6250 if (iGReg == X86_GREG_xSP)
6251 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6252 else
6253 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6254#ifdef VBOX_WITH_STATISTICS
6255 switch (iCrReg)
6256 {
6257 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6258 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6259 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6260 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6261 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6262 }
6263#endif
6264 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6265 return rcStrict;
6266}
6267
6268
6269/**
6270 * VM-exit helper for MOV to CRx (CRx write).
6271 */
6272static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6273{
6274 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6275
6276 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6277 AssertMsg( rcStrict == VINF_SUCCESS
6278 || rcStrict == VINF_IEM_RAISED_XCPT
6279 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6280
6281 switch (iCrReg)
6282 {
6283 case 0:
6284 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6285 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6286 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6287 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6288 break;
6289
6290 case 2:
6291 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6292 /* Nothing to do here, CR2 it's not part of the VMCS. */
6293 break;
6294
6295 case 3:
6296 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6297 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6298 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6299 break;
6300
6301 case 4:
6302 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6303 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6304#ifndef IN_NEM_DARWIN
6305 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6306 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6307#else
6308 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6309#endif
6310 break;
6311
6312 case 8:
6313 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6314 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6315 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6316 break;
6317
6318 default:
6319 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6320 break;
6321 }
6322
6323 if (rcStrict == VINF_IEM_RAISED_XCPT)
6324 {
6325 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6326 rcStrict = VINF_SUCCESS;
6327 }
6328 return rcStrict;
6329}
6330
6331
6332/**
6333 * VM-exit exception handler for \#PF (Page-fault exception).
6334 *
6335 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6336 */
6337static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6338{
6339 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6340 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6341
6342#ifndef IN_NEM_DARWIN
6343 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6344 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6345 { /* likely */ }
6346 else
6347#endif
6348 {
6349#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6350 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6351#endif
6352 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6353 if (!pVmxTransient->fVectoringDoublePF)
6354 {
6355 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6356 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6357 }
6358 else
6359 {
6360 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6361 Assert(!pVmxTransient->fIsNestedGuest);
6362 vmxHCSetPendingXcptDF(pVCpu);
6363 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6364 }
6365 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6366 return VINF_SUCCESS;
6367 }
6368
6369 Assert(!pVmxTransient->fIsNestedGuest);
6370
6371 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6372 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6373 if (pVmxTransient->fVectoringPF)
6374 {
6375 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6376 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6377 }
6378
6379 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6380 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6381 AssertRCReturn(rc, rc);
6382
6383 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6384 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6385
6386 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6387 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6388
6389 Log4Func(("#PF: rc=%Rrc\n", rc));
6390 if (rc == VINF_SUCCESS)
6391 {
6392 /*
6393 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6394 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6395 */
6396 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6397 TRPMResetTrap(pVCpu);
6398 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6399 return rc;
6400 }
6401
6402 if (rc == VINF_EM_RAW_GUEST_TRAP)
6403 {
6404 if (!pVmxTransient->fVectoringDoublePF)
6405 {
6406 /* It's a guest page fault and needs to be reflected to the guest. */
6407 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6408 TRPMResetTrap(pVCpu);
6409 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6410 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6411 uGstErrorCode, pVmxTransient->uExitQual);
6412 }
6413 else
6414 {
6415 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6416 TRPMResetTrap(pVCpu);
6417 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6418 vmxHCSetPendingXcptDF(pVCpu);
6419 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6420 }
6421
6422 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6423 return VINF_SUCCESS;
6424 }
6425
6426 TRPMResetTrap(pVCpu);
6427 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6428 return rc;
6429}
6430
6431
6432/**
6433 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6434 *
6435 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6436 */
6437static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6438{
6439 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6440 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6441
6442 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6443 AssertRCReturn(rc, rc);
6444
6445 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6446 {
6447 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6448 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6449
6450 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6451 * provides VM-exit instruction length. If this causes problem later,
6452 * disassemble the instruction like it's done on AMD-V. */
6453 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6454 AssertRCReturn(rc2, rc2);
6455 return rc;
6456 }
6457
6458 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6459 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6460 return VINF_SUCCESS;
6461}
6462
6463
6464/**
6465 * VM-exit exception handler for \#BP (Breakpoint exception).
6466 *
6467 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6468 */
6469static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6470{
6471 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6472 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6473
6474 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6475 AssertRCReturn(rc, rc);
6476
6477 VBOXSTRICTRC rcStrict;
6478 if (!pVmxTransient->fIsNestedGuest)
6479 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6480 else
6481 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6482
6483 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6484 {
6485 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6486 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6487 rcStrict = VINF_SUCCESS;
6488 }
6489
6490 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6491 return rcStrict;
6492}
6493
6494
6495/**
6496 * VM-exit exception handler for \#AC (Alignment-check exception).
6497 *
6498 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6499 */
6500static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6501{
6502 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6503
6504 /*
6505 * Detect #ACs caused by host having enabled split-lock detection.
6506 * Emulate such instructions.
6507 */
6508 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6509 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6510 AssertRCReturn(rc, rc);
6511 /** @todo detect split lock in cpu feature? */
6512 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6513 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6514 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6515 || CPUMGetGuestCPL(pVCpu) != 3
6516 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6517 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6518 {
6519 /*
6520 * Check for debug/trace events and import state accordingly.
6521 */
6522 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6523 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6524 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6525#ifndef IN_NEM_DARWIN
6526 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6527#endif
6528 )
6529 {
6530 if (pVM->cCpus == 1)
6531 {
6532#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6533 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6534#else
6535 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6536#endif
6537 AssertRCReturn(rc, rc);
6538 }
6539 }
6540 else
6541 {
6542 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6543 AssertRCReturn(rc, rc);
6544
6545 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6546
6547 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6548 {
6549 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6550 if (rcStrict != VINF_SUCCESS)
6551 return rcStrict;
6552 }
6553 }
6554
6555 /*
6556 * Emulate the instruction.
6557 *
6558 * We have to ignore the LOCK prefix here as we must not retrigger the
6559 * detection on the host. This isn't all that satisfactory, though...
6560 */
6561 if (pVM->cCpus == 1)
6562 {
6563 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6564 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6565
6566 /** @todo For SMP configs we should do a rendezvous here. */
6567 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6568 if (rcStrict == VINF_SUCCESS)
6569#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6570 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6571 HM_CHANGED_GUEST_RIP
6572 | HM_CHANGED_GUEST_RFLAGS
6573 | HM_CHANGED_GUEST_GPRS_MASK
6574 | HM_CHANGED_GUEST_CS
6575 | HM_CHANGED_GUEST_SS);
6576#else
6577 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6578#endif
6579 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6580 {
6581 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6582 rcStrict = VINF_SUCCESS;
6583 }
6584 return rcStrict;
6585 }
6586 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6587 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6588 return VINF_EM_EMULATE_SPLIT_LOCK;
6589 }
6590
6591 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6592 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6593 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6594
6595 /* Re-inject it. We'll detect any nesting before getting here. */
6596 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6597 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6598 return VINF_SUCCESS;
6599}
6600
6601
6602/**
6603 * VM-exit exception handler for \#DB (Debug exception).
6604 *
6605 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6606 */
6607static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6608{
6609 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6610 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6611
6612 /*
6613 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6614 */
6615 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6616
6617 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6618 uint64_t const uDR6 = X86_DR6_INIT_VAL
6619 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6620 | X86_DR6_BD | X86_DR6_BS));
6621
6622 int rc;
6623 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6624 if (!pVmxTransient->fIsNestedGuest)
6625 {
6626 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6627
6628 /*
6629 * Prevents stepping twice over the same instruction when the guest is stepping using
6630 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6631 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6632 */
6633 if ( rc == VINF_EM_DBG_STEPPED
6634 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6635 {
6636 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6637 rc = VINF_EM_RAW_GUEST_TRAP;
6638 }
6639 }
6640 else
6641 rc = VINF_EM_RAW_GUEST_TRAP;
6642 Log6Func(("rc=%Rrc\n", rc));
6643 if (rc == VINF_EM_RAW_GUEST_TRAP)
6644 {
6645 /*
6646 * The exception was for the guest. Update DR6, DR7.GD and
6647 * IA32_DEBUGCTL.LBR before forwarding it.
6648 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6649 */
6650#ifndef IN_NEM_DARWIN
6651 VMMRZCallRing3Disable(pVCpu);
6652 HM_DISABLE_PREEMPT(pVCpu);
6653
6654 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6655 pCtx->dr[6] |= uDR6;
6656 if (CPUMIsGuestDebugStateActive(pVCpu))
6657 ASMSetDR6(pCtx->dr[6]);
6658
6659 HM_RESTORE_PREEMPT();
6660 VMMRZCallRing3Enable(pVCpu);
6661#else
6662 /** @todo */
6663#endif
6664
6665 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6666 AssertRCReturn(rc, rc);
6667
6668 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6669 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6670
6671 /* Paranoia. */
6672 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6673 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6674
6675 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6676 AssertRC(rc);
6677
6678 /*
6679 * Raise #DB in the guest.
6680 *
6681 * It is important to reflect exactly what the VM-exit gave us (preserving the
6682 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6683 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6684 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6685 *
6686 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6687 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6688 */
6689 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6690 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6691 return VINF_SUCCESS;
6692 }
6693
6694 /*
6695 * Not a guest trap, must be a hypervisor related debug event then.
6696 * Update DR6 in case someone is interested in it.
6697 */
6698 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6699 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6700 CPUMSetHyperDR6(pVCpu, uDR6);
6701
6702 return rc;
6703}
6704
6705
6706/**
6707 * Hacks its way around the lovely mesa driver's backdoor accesses.
6708 *
6709 * @sa hmR0SvmHandleMesaDrvGp.
6710 */
6711static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6712{
6713 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6714 RT_NOREF(pCtx);
6715
6716 /* For now we'll just skip the instruction. */
6717 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6718}
6719
6720
6721/**
6722 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6723 * backdoor logging w/o checking what it is running inside.
6724 *
6725 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6726 * backdoor port and magic numbers loaded in registers.
6727 *
6728 * @returns true if it is, false if it isn't.
6729 * @sa hmR0SvmIsMesaDrvGp.
6730 */
6731DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6732{
6733 /* 0xed: IN eAX,dx */
6734 uint8_t abInstr[1];
6735 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6736 return false;
6737
6738 /* Check that it is #GP(0). */
6739 if (pVmxTransient->uExitIntErrorCode != 0)
6740 return false;
6741
6742 /* Check magic and port. */
6743 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6744 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6745 if (pCtx->rax != UINT32_C(0x564d5868))
6746 return false;
6747 if (pCtx->dx != UINT32_C(0x5658))
6748 return false;
6749
6750 /* Flat ring-3 CS. */
6751 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6752 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6753 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6754 if (pCtx->cs.Attr.n.u2Dpl != 3)
6755 return false;
6756 if (pCtx->cs.u64Base != 0)
6757 return false;
6758
6759 /* Check opcode. */
6760 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6761 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6762 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6763 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6764 if (RT_FAILURE(rc))
6765 return false;
6766 if (abInstr[0] != 0xed)
6767 return false;
6768
6769 return true;
6770}
6771
6772
6773/**
6774 * VM-exit exception handler for \#GP (General-protection exception).
6775 *
6776 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6777 */
6778static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6779{
6780 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6781 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6782
6783 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6784 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6785#ifndef IN_NEM_DARWIN
6786 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6787 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6788 { /* likely */ }
6789 else
6790#endif
6791 {
6792#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6793# ifndef IN_NEM_DARWIN
6794 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6795# else
6796 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6797# endif
6798#endif
6799 /*
6800 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6801 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6802 */
6803 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6804 AssertRCReturn(rc, rc);
6805 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6806 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6807
6808 if ( pVmxTransient->fIsNestedGuest
6809 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6810 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6811 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6812 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6813 else
6814 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6815 return rc;
6816 }
6817
6818#ifndef IN_NEM_DARWIN
6819 Assert(CPUMIsGuestInRealModeEx(pCtx));
6820 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6821 Assert(!pVmxTransient->fIsNestedGuest);
6822
6823 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6824 AssertRCReturn(rc, rc);
6825
6826 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6827 if (rcStrict == VINF_SUCCESS)
6828 {
6829 if (!CPUMIsGuestInRealModeEx(pCtx))
6830 {
6831 /*
6832 * The guest is no longer in real-mode, check if we can continue executing the
6833 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6834 */
6835 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6836 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6837 {
6838 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6839 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6840 }
6841 else
6842 {
6843 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6844 rcStrict = VINF_EM_RESCHEDULE;
6845 }
6846 }
6847 else
6848 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6849 }
6850 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6851 {
6852 rcStrict = VINF_SUCCESS;
6853 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6854 }
6855 return VBOXSTRICTRC_VAL(rcStrict);
6856#endif
6857}
6858
6859
6860/**
6861 * VM-exit exception handler for \#DE (Divide Error).
6862 *
6863 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6864 */
6865static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6866{
6867 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6868 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
6869
6870 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6871 AssertRCReturn(rc, rc);
6872
6873 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
6874 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
6875 {
6876 uint8_t cbInstr = 0;
6877 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
6878 if (rc2 == VINF_SUCCESS)
6879 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
6880 else if (rc2 == VERR_NOT_FOUND)
6881 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
6882 else
6883 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
6884 }
6885 else
6886 rcStrict = VINF_SUCCESS; /* Do nothing. */
6887
6888 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
6889 if (RT_FAILURE(rcStrict))
6890 {
6891 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6892 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6893 rcStrict = VINF_SUCCESS;
6894 }
6895
6896 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
6897 return VBOXSTRICTRC_VAL(rcStrict);
6898}
6899
6900
6901/**
6902 * VM-exit exception handler wrapper for all other exceptions that are not handled
6903 * by a specific handler.
6904 *
6905 * This simply re-injects the exception back into the VM without any special
6906 * processing.
6907 *
6908 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6909 */
6910static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6911{
6912 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6913
6914#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6915# ifndef IN_NEM_DARWIN
6916 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6917 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6918 ("uVector=%#x u32XcptBitmap=%#X32\n",
6919 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6920 NOREF(pVmcsInfo);
6921# endif
6922#endif
6923
6924 /*
6925 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6926 * would have been handled while checking exits due to event delivery.
6927 */
6928 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6929
6930#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6931 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6932 AssertRCReturn(rc, rc);
6933 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6934#endif
6935
6936#ifdef VBOX_WITH_STATISTICS
6937 switch (uVector)
6938 {
6939 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6940 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6941 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6942 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6943 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6944 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6945 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6946 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6947 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6948 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6949 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6950 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6951 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6952 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6953 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6954 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6955 default:
6956 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6957 break;
6958 }
6959#endif
6960
6961 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6962 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6963 NOREF(uVector);
6964
6965 /* Re-inject the original exception into the guest. */
6966 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6967 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6968 return VINF_SUCCESS;
6969}
6970
6971
6972/**
6973 * VM-exit exception handler for all exceptions (except NMIs!).
6974 *
6975 * @remarks This may be called for both guests and nested-guests. Take care to not
6976 * make assumptions and avoid doing anything that is not relevant when
6977 * executing a nested-guest (e.g., Mesa driver hacks).
6978 */
6979static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6980{
6981 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6982
6983 /*
6984 * If this VM-exit occurred while delivering an event through the guest IDT, take
6985 * action based on the return code and additional hints (e.g. for page-faults)
6986 * that will be updated in the VMX transient structure.
6987 */
6988 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6989 if (rcStrict == VINF_SUCCESS)
6990 {
6991 /*
6992 * If an exception caused a VM-exit due to delivery of an event, the original
6993 * event may have to be re-injected into the guest. We shall reinject it and
6994 * continue guest execution. However, page-fault is a complicated case and
6995 * needs additional processing done in vmxHCExitXcptPF().
6996 */
6997 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6998 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6999 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7000 || uVector == X86_XCPT_PF)
7001 {
7002 switch (uVector)
7003 {
7004 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7005 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7006 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7007 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7008 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7009 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7010 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7011 default:
7012 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7013 }
7014 }
7015 /* else: inject pending event before resuming guest execution. */
7016 }
7017 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7018 {
7019 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7020 rcStrict = VINF_SUCCESS;
7021 }
7022
7023 return rcStrict;
7024}
7025/** @} */
7026
7027
7028/** @name VM-exit handlers.
7029 * @{
7030 */
7031/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7032/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7033/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7034
7035/**
7036 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7037 */
7038HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7039{
7040 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7041 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7042
7043#ifndef IN_NEM_DARWIN
7044 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7045 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7046 return VINF_SUCCESS;
7047 return VINF_EM_RAW_INTERRUPT;
7048#else
7049 return VINF_SUCCESS;
7050#endif
7051}
7052
7053
7054/**
7055 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7056 * VM-exit.
7057 */
7058HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7059{
7060 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7061 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7062
7063 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7064
7065 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7066 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7067 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7068
7069 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7070 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7071 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7072 NOREF(pVmcsInfo);
7073
7074 VBOXSTRICTRC rcStrict;
7075 switch (uExitIntType)
7076 {
7077#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7078 /*
7079 * Host physical NMIs:
7080 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7081 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7082 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7083 *
7084 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7085 * See Intel spec. 27.5.5 "Updating Non-Register State".
7086 */
7087 case VMX_EXIT_INT_INFO_TYPE_NMI:
7088 {
7089 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7090 break;
7091 }
7092#endif
7093
7094 /*
7095 * Privileged software exceptions (#DB from ICEBP),
7096 * Software exceptions (#BP and #OF),
7097 * Hardware exceptions:
7098 * Process the required exceptions and resume guest execution if possible.
7099 */
7100 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7101 Assert(uVector == X86_XCPT_DB);
7102 RT_FALL_THRU();
7103 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7104 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7105 RT_FALL_THRU();
7106 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7107 {
7108 NOREF(uVector);
7109 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7110 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7111 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7112 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7113
7114 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7115 break;
7116 }
7117
7118 default:
7119 {
7120 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7121 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7122 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7123 break;
7124 }
7125 }
7126
7127 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7128 return rcStrict;
7129}
7130
7131
7132/**
7133 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7134 */
7135HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7136{
7137 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7138
7139 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7140 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7141 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7142
7143 /* Evaluate and deliver pending events and resume guest execution. */
7144 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7145 return VINF_SUCCESS;
7146}
7147
7148
7149/**
7150 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7151 */
7152HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7153{
7154 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7155
7156 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7157 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7158 {
7159 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7160 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7161 }
7162
7163 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7164
7165 /*
7166 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7167 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7168 */
7169 uint32_t fIntrState;
7170 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7171 AssertRC(rc);
7172 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7173 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7174 {
7175 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7176 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7177
7178 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7179 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7180 AssertRC(rc);
7181 }
7182
7183 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7184 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7185
7186 /* Evaluate and deliver pending events and resume guest execution. */
7187 return VINF_SUCCESS;
7188}
7189
7190
7191/**
7192 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7193 */
7194HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7195{
7196 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7197 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7198}
7199
7200
7201/**
7202 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7203 */
7204HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7205{
7206 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7207 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7208}
7209
7210
7211/**
7212 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7213 */
7214HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7215{
7216 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7217
7218 /*
7219 * Get the state we need and update the exit history entry.
7220 */
7221 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7222 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7223
7224 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7225 AssertRCReturn(rc, rc);
7226
7227 VBOXSTRICTRC rcStrict;
7228 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7229 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7230 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7231 if (!pExitRec)
7232 {
7233 /*
7234 * Regular CPUID instruction execution.
7235 */
7236 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7237 if (rcStrict == VINF_SUCCESS)
7238 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7239 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7240 {
7241 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7242 rcStrict = VINF_SUCCESS;
7243 }
7244 }
7245 else
7246 {
7247 /*
7248 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7249 */
7250 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7251 AssertRCReturn(rc2, rc2);
7252
7253 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7254 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7255
7256 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7257 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7258
7259 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7260 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7261 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7262 }
7263 return rcStrict;
7264}
7265
7266
7267/**
7268 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7269 */
7270HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7271{
7272 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7273
7274 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7275 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7276 AssertRCReturn(rc, rc);
7277
7278 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7279 return VINF_EM_RAW_EMULATE_INSTR;
7280
7281 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7282 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7283}
7284
7285
7286/**
7287 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7288 */
7289HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7290{
7291 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7292
7293 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7294 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7295 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7296 AssertRCReturn(rc, rc);
7297
7298 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7299 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7300 {
7301 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7302 we must reset offsetting on VM-entry. See @bugref{6634}. */
7303 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7304 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7305 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7306 }
7307 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7308 {
7309 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7310 rcStrict = VINF_SUCCESS;
7311 }
7312 return rcStrict;
7313}
7314
7315
7316/**
7317 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7318 */
7319HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7320{
7321 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7322
7323 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7324 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7325 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7326 AssertRCReturn(rc, rc);
7327
7328 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7329 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7330 {
7331 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7332 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7333 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7334 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7335 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7336 }
7337 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7338 {
7339 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7340 rcStrict = VINF_SUCCESS;
7341 }
7342 return rcStrict;
7343}
7344
7345
7346/**
7347 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7348 */
7349HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7350{
7351 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7352
7353 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7354 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7355 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7356 AssertRCReturn(rc, rc);
7357
7358 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7359 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7360 if (RT_LIKELY(rc == VINF_SUCCESS))
7361 {
7362 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7363 Assert(pVmxTransient->cbExitInstr == 2);
7364 }
7365 else
7366 {
7367 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7368 rc = VERR_EM_INTERPRETER;
7369 }
7370 return rc;
7371}
7372
7373
7374/**
7375 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7376 */
7377HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7378{
7379 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7380
7381 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7382 if (EMAreHypercallInstructionsEnabled(pVCpu))
7383 {
7384 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7385 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7386 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7387 AssertRCReturn(rc, rc);
7388
7389 /* Perform the hypercall. */
7390 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7391 if (rcStrict == VINF_SUCCESS)
7392 {
7393 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7394 AssertRCReturn(rc, rc);
7395 }
7396 else
7397 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7398 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7399 || RT_FAILURE(rcStrict));
7400
7401 /* If the hypercall changes anything other than guest's general-purpose registers,
7402 we would need to reload the guest changed bits here before VM-entry. */
7403 }
7404 else
7405 Log4Func(("Hypercalls not enabled\n"));
7406
7407 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7408 if (RT_FAILURE(rcStrict))
7409 {
7410 vmxHCSetPendingXcptUD(pVCpu);
7411 rcStrict = VINF_SUCCESS;
7412 }
7413
7414 return rcStrict;
7415}
7416
7417
7418/**
7419 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7420 */
7421HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7422{
7423 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7424#ifndef IN_NEM_DARWIN
7425 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7426#endif
7427
7428 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7429 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7430 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7431 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7432 AssertRCReturn(rc, rc);
7433
7434 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7435
7436 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7437 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7438 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7439 {
7440 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7441 rcStrict = VINF_SUCCESS;
7442 }
7443 else
7444 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7445 VBOXSTRICTRC_VAL(rcStrict)));
7446 return rcStrict;
7447}
7448
7449
7450/**
7451 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7452 */
7453HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7454{
7455 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7456
7457 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7458 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7459 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7460 AssertRCReturn(rc, rc);
7461
7462 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7463 if (rcStrict == VINF_SUCCESS)
7464 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7465 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7466 {
7467 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7468 rcStrict = VINF_SUCCESS;
7469 }
7470
7471 return rcStrict;
7472}
7473
7474
7475/**
7476 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7477 */
7478HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7479{
7480 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7481
7482 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7483 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7484 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7485 AssertRCReturn(rc, rc);
7486
7487 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7488 if (RT_SUCCESS(rcStrict))
7489 {
7490 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7491 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7492 rcStrict = VINF_SUCCESS;
7493 }
7494
7495 return rcStrict;
7496}
7497
7498
7499/**
7500 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7501 * VM-exit.
7502 */
7503HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7504{
7505 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7506 return VINF_EM_RESET;
7507}
7508
7509
7510/**
7511 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7512 */
7513HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7514{
7515 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7516
7517 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7518 AssertRCReturn(rc, rc);
7519
7520 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7521 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7522 rc = VINF_SUCCESS;
7523 else
7524 rc = VINF_EM_HALT;
7525
7526 if (rc != VINF_SUCCESS)
7527 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7528 return rc;
7529}
7530
7531
7532#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7533/**
7534 * VM-exit handler for instructions that result in a \#UD exception delivered to
7535 * the guest.
7536 */
7537HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7538{
7539 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7540 vmxHCSetPendingXcptUD(pVCpu);
7541 return VINF_SUCCESS;
7542}
7543#endif
7544
7545
7546/**
7547 * VM-exit handler for expiry of the VMX-preemption timer.
7548 */
7549HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7550{
7551 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7552
7553 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7554 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7555Log12(("vmxHCExitPreemptTimer:\n"));
7556
7557 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7558 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7559 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7560 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7561 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7562}
7563
7564
7565/**
7566 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7567 */
7568HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7569{
7570 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7571
7572 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7573 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7574 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7575 AssertRCReturn(rc, rc);
7576
7577 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7578 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7579 : HM_CHANGED_RAISED_XCPT_MASK);
7580
7581#ifndef IN_NEM_DARWIN
7582 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7583 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7584 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7585 {
7586 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7587 hmR0VmxUpdateStartVmFunction(pVCpu);
7588 }
7589#endif
7590
7591 return rcStrict;
7592}
7593
7594
7595/**
7596 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7597 */
7598HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7599{
7600 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7601
7602 /** @todo Enable the new code after finding a reliably guest test-case. */
7603#if 1
7604 return VERR_EM_INTERPRETER;
7605#else
7606 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7607 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7608 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7609 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7610 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7611 AssertRCReturn(rc, rc);
7612
7613 /* Paranoia. Ensure this has a memory operand. */
7614 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7615
7616 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7617 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7618 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7619 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7620
7621 RTGCPTR GCPtrDesc;
7622 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7623
7624 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7625 GCPtrDesc, uType);
7626 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7627 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7628 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7629 {
7630 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7631 rcStrict = VINF_SUCCESS;
7632 }
7633 return rcStrict;
7634#endif
7635}
7636
7637
7638/**
7639 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7640 * VM-exit.
7641 */
7642HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7643{
7644 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7645 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7646 AssertRCReturn(rc, rc);
7647
7648 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7649 if (RT_FAILURE(rc))
7650 return rc;
7651
7652 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7653 NOREF(uInvalidReason);
7654
7655#ifdef VBOX_STRICT
7656 uint32_t fIntrState;
7657 uint64_t u64Val;
7658 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7659 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7660 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7661
7662 Log4(("uInvalidReason %u\n", uInvalidReason));
7663 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7664 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7665 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7666
7667 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7668 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7669 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7670 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7671 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7672 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7673 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7674 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7675 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7676 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7677 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7678 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7679# ifndef IN_NEM_DARWIN
7680 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7681 {
7682 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7683 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7684 }
7685
7686 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7687# endif
7688#endif
7689
7690 return VERR_VMX_INVALID_GUEST_STATE;
7691}
7692
7693/**
7694 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7695 */
7696HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7697{
7698 /*
7699 * Cumulative notes of all recognized but unexpected VM-exits.
7700 *
7701 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7702 * nested-paging is used.
7703 *
7704 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7705 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7706 * this function (and thereby stop VM execution) for handling such instructions.
7707 *
7708 *
7709 * VMX_EXIT_INIT_SIGNAL:
7710 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7711 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7712 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7713 *
7714 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7715 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7716 * See Intel spec. "23.8 Restrictions on VMX operation".
7717 *
7718 * VMX_EXIT_SIPI:
7719 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7720 * activity state is used. We don't make use of it as our guests don't have direct
7721 * access to the host local APIC.
7722 *
7723 * See Intel spec. 25.3 "Other Causes of VM-exits".
7724 *
7725 * VMX_EXIT_IO_SMI:
7726 * VMX_EXIT_SMI:
7727 * This can only happen if we support dual-monitor treatment of SMI, which can be
7728 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7729 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7730 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7731 *
7732 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7733 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7734 *
7735 * VMX_EXIT_ERR_MSR_LOAD:
7736 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7737 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7738 * execution.
7739 *
7740 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7741 *
7742 * VMX_EXIT_ERR_MACHINE_CHECK:
7743 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7744 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7745 * #MC exception abort class exception is raised. We thus cannot assume a
7746 * reasonable chance of continuing any sort of execution and we bail.
7747 *
7748 * See Intel spec. 15.1 "Machine-check Architecture".
7749 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7750 *
7751 * VMX_EXIT_PML_FULL:
7752 * VMX_EXIT_VIRTUALIZED_EOI:
7753 * VMX_EXIT_APIC_WRITE:
7754 * We do not currently support any of these features and thus they are all unexpected
7755 * VM-exits.
7756 *
7757 * VMX_EXIT_GDTR_IDTR_ACCESS:
7758 * VMX_EXIT_LDTR_TR_ACCESS:
7759 * VMX_EXIT_RDRAND:
7760 * VMX_EXIT_RSM:
7761 * VMX_EXIT_VMFUNC:
7762 * VMX_EXIT_ENCLS:
7763 * VMX_EXIT_RDSEED:
7764 * VMX_EXIT_XSAVES:
7765 * VMX_EXIT_XRSTORS:
7766 * VMX_EXIT_UMWAIT:
7767 * VMX_EXIT_TPAUSE:
7768 * VMX_EXIT_LOADIWKEY:
7769 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7770 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7771 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7772 *
7773 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7774 */
7775 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7776 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7777 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7778}
7779
7780
7781/**
7782 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7783 */
7784HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7785{
7786 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7787
7788 /** @todo Optimize this: We currently drag in the whole MSR state
7789 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7790 * MSRs required. That would require changes to IEM and possibly CPUM too.
7791 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7792 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7793 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7794 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7795 switch (idMsr)
7796 {
7797 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7798 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7799 }
7800
7801 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7802 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7803 AssertRCReturn(rc, rc);
7804
7805 Log4Func(("ecx=%#RX32\n", idMsr));
7806
7807#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7808 Assert(!pVmxTransient->fIsNestedGuest);
7809 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7810 {
7811 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7812 && idMsr != MSR_K6_EFER)
7813 {
7814 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7815 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7816 }
7817 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7818 {
7819 Assert(pVmcsInfo->pvMsrBitmap);
7820 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7821 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7822 {
7823 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7824 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7825 }
7826 }
7827 }
7828#endif
7829
7830 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7831 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7832 if (rcStrict == VINF_SUCCESS)
7833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7834 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7835 {
7836 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7837 rcStrict = VINF_SUCCESS;
7838 }
7839 else
7840 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7841 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7842
7843 return rcStrict;
7844}
7845
7846
7847/**
7848 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7849 */
7850HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7851{
7852 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7853
7854 /** @todo Optimize this: We currently drag in the whole MSR state
7855 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7856 * MSRs required. That would require changes to IEM and possibly CPUM too.
7857 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7858 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7859 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7860
7861 /*
7862 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7863 * Although we don't need to fetch the base as it will be overwritten shortly, while
7864 * loading guest-state we would also load the entire segment register including limit
7865 * and attributes and thus we need to load them here.
7866 */
7867 switch (idMsr)
7868 {
7869 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7870 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7871 }
7872
7873 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7874 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7875 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7876 AssertRCReturn(rc, rc);
7877
7878 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7879
7880 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7881 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7882
7883 if (rcStrict == VINF_SUCCESS)
7884 {
7885 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7886
7887 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7888 if ( idMsr == MSR_IA32_APICBASE
7889 || ( idMsr >= MSR_IA32_X2APIC_START
7890 && idMsr <= MSR_IA32_X2APIC_END))
7891 {
7892 /*
7893 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7894 * When full APIC register virtualization is implemented we'll have to make
7895 * sure APIC state is saved from the VMCS before IEM changes it.
7896 */
7897 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7898 }
7899 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7900 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7901 else if (idMsr == MSR_K6_EFER)
7902 {
7903 /*
7904 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7905 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7906 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7907 */
7908 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7909 }
7910
7911 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7912 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7913 {
7914 switch (idMsr)
7915 {
7916 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7917 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7918 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7919 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7920 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7921 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7922 default:
7923 {
7924#ifndef IN_NEM_DARWIN
7925 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7926 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7927 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7928 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7929#else
7930 AssertMsgFailed(("TODO\n"));
7931#endif
7932 break;
7933 }
7934 }
7935 }
7936#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7937 else
7938 {
7939 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7940 switch (idMsr)
7941 {
7942 case MSR_IA32_SYSENTER_CS:
7943 case MSR_IA32_SYSENTER_EIP:
7944 case MSR_IA32_SYSENTER_ESP:
7945 case MSR_K8_FS_BASE:
7946 case MSR_K8_GS_BASE:
7947 {
7948 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7949 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7950 }
7951
7952 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7953 default:
7954 {
7955 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7956 {
7957 /* EFER MSR writes are always intercepted. */
7958 if (idMsr != MSR_K6_EFER)
7959 {
7960 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7961 idMsr));
7962 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7963 }
7964 }
7965
7966 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7967 {
7968 Assert(pVmcsInfo->pvMsrBitmap);
7969 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7970 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7971 {
7972 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7973 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7974 }
7975 }
7976 break;
7977 }
7978 }
7979 }
7980#endif /* VBOX_STRICT */
7981 }
7982 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7983 {
7984 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7985 rcStrict = VINF_SUCCESS;
7986 }
7987 else
7988 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7989 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7990
7991 return rcStrict;
7992}
7993
7994
7995/**
7996 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7997 */
7998HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7999{
8000 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8001
8002 /** @todo The guest has likely hit a contended spinlock. We might want to
8003 * poke a schedule different guest VCPU. */
8004 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8005 if (RT_SUCCESS(rc))
8006 return VINF_EM_RAW_INTERRUPT;
8007
8008 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8009 return rc;
8010}
8011
8012
8013/**
8014 * VM-exit handler for when the TPR value is lowered below the specified
8015 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8016 */
8017HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8018{
8019 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8020 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8021
8022 /*
8023 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8024 * We'll re-evaluate pending interrupts and inject them before the next VM
8025 * entry so we can just continue execution here.
8026 */
8027 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8028 return VINF_SUCCESS;
8029}
8030
8031
8032/**
8033 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8034 * VM-exit.
8035 *
8036 * @retval VINF_SUCCESS when guest execution can continue.
8037 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8038 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8039 * incompatible guest state for VMX execution (real-on-v86 case).
8040 */
8041HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8042{
8043 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8044 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8045
8046 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8047 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8048 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8049
8050 VBOXSTRICTRC rcStrict;
8051 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8052 uint64_t const uExitQual = pVmxTransient->uExitQual;
8053 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8054 switch (uAccessType)
8055 {
8056 /*
8057 * MOV to CRx.
8058 */
8059 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8060 {
8061 /*
8062 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8063 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8064 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8065 * PAE PDPTEs as well.
8066 */
8067 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8068 AssertRCReturn(rc, rc);
8069
8070 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8071#ifndef IN_NEM_DARWIN
8072 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8073#endif
8074 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8075 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8076
8077 /*
8078 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8079 * - When nested paging isn't used.
8080 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8081 * - We are executing in the VM debug loop.
8082 */
8083#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8084# ifndef IN_NEM_DARWIN
8085 Assert( iCrReg != 3
8086 || !VM_IS_VMX_NESTED_PAGING(pVM)
8087 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8088 || pVCpu->hmr0.s.fUsingDebugLoop);
8089# else
8090 Assert( iCrReg != 3
8091 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8092# endif
8093#endif
8094
8095 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8096 Assert( iCrReg != 8
8097 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8098
8099 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8100 AssertMsg( rcStrict == VINF_SUCCESS
8101 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8102
8103#ifndef IN_NEM_DARWIN
8104 /*
8105 * This is a kludge for handling switches back to real mode when we try to use
8106 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8107 * deal with special selector values, so we have to return to ring-3 and run
8108 * there till the selector values are V86 mode compatible.
8109 *
8110 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8111 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8112 * this function.
8113 */
8114 if ( iCrReg == 0
8115 && rcStrict == VINF_SUCCESS
8116 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8117 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8118 && (uOldCr0 & X86_CR0_PE)
8119 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8120 {
8121 /** @todo Check selectors rather than returning all the time. */
8122 Assert(!pVmxTransient->fIsNestedGuest);
8123 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8124 rcStrict = VINF_EM_RESCHEDULE_REM;
8125 }
8126#endif
8127
8128 break;
8129 }
8130
8131 /*
8132 * MOV from CRx.
8133 */
8134 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8135 {
8136 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8137 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8138
8139 /*
8140 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8141 * - When nested paging isn't used.
8142 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8143 * - We are executing in the VM debug loop.
8144 */
8145#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8146# ifndef IN_NEM_DARWIN
8147 Assert( iCrReg != 3
8148 || !VM_IS_VMX_NESTED_PAGING(pVM)
8149 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8150 || pVCpu->hmr0.s.fLeaveDone);
8151# else
8152 Assert( iCrReg != 3
8153 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8154# endif
8155#endif
8156
8157 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8158 Assert( iCrReg != 8
8159 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8160
8161 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8162 break;
8163 }
8164
8165 /*
8166 * CLTS (Clear Task-Switch Flag in CR0).
8167 */
8168 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8169 {
8170 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8171 break;
8172 }
8173
8174 /*
8175 * LMSW (Load Machine-Status Word into CR0).
8176 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8177 */
8178 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8179 {
8180 RTGCPTR GCPtrEffDst;
8181 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8182 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8183 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8184 if (fMemOperand)
8185 {
8186 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8187 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8188 }
8189 else
8190 GCPtrEffDst = NIL_RTGCPTR;
8191 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8192 break;
8193 }
8194
8195 default:
8196 {
8197 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8198 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8199 }
8200 }
8201
8202 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8203 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8204 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8205
8206 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8207 NOREF(pVM);
8208 return rcStrict;
8209}
8210
8211
8212/**
8213 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8214 * VM-exit.
8215 */
8216HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8217{
8218 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8219 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8220
8221 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8222 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8223 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8224 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8225 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8226 | CPUMCTX_EXTRN_EFER);
8227 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8228 AssertRCReturn(rc, rc);
8229
8230 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8231 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8232 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8233 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8234 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8235 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8236 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8237 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8238
8239 /*
8240 * Update exit history to see if this exit can be optimized.
8241 */
8242 VBOXSTRICTRC rcStrict;
8243 PCEMEXITREC pExitRec = NULL;
8244 if ( !fGstStepping
8245 && !fDbgStepping)
8246 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8247 !fIOString
8248 ? !fIOWrite
8249 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8250 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8251 : !fIOWrite
8252 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8253 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8254 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8255 if (!pExitRec)
8256 {
8257 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8258 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8259
8260 uint32_t const cbValue = s_aIOSizes[uIOSize];
8261 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8262 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8263 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8264 if (fIOString)
8265 {
8266 /*
8267 * INS/OUTS - I/O String instruction.
8268 *
8269 * Use instruction-information if available, otherwise fall back on
8270 * interpreting the instruction.
8271 */
8272 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8273 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8274 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8275 if (fInsOutsInfo)
8276 {
8277 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8278 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8279 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8280 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8281 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8282 if (fIOWrite)
8283 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8284 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8285 else
8286 {
8287 /*
8288 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8289 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8290 * See Intel Instruction spec. for "INS".
8291 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8292 */
8293 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8294 }
8295 }
8296 else
8297 rcStrict = IEMExecOne(pVCpu);
8298
8299 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8300 fUpdateRipAlready = true;
8301 }
8302 else
8303 {
8304 /*
8305 * IN/OUT - I/O instruction.
8306 */
8307 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8308 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8309 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8310 if (fIOWrite)
8311 {
8312 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8313 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8314#ifndef IN_NEM_DARWIN
8315 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8316 && !pCtx->eflags.Bits.u1TF)
8317 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8318#endif
8319 }
8320 else
8321 {
8322 uint32_t u32Result = 0;
8323 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8324 if (IOM_SUCCESS(rcStrict))
8325 {
8326 /* Save result of I/O IN instr. in AL/AX/EAX. */
8327 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8328 }
8329#ifndef IN_NEM_DARWIN
8330 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8331 && !pCtx->eflags.Bits.u1TF)
8332 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8333#endif
8334 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8335 }
8336 }
8337
8338 if (IOM_SUCCESS(rcStrict))
8339 {
8340 if (!fUpdateRipAlready)
8341 {
8342 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8343 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8344 }
8345
8346 /*
8347 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8348 * while booting Fedora 17 64-bit guest.
8349 *
8350 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8351 */
8352 if (fIOString)
8353 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8354
8355 /*
8356 * If any I/O breakpoints are armed, we need to check if one triggered
8357 * and take appropriate action.
8358 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8359 */
8360 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8361 AssertRCReturn(rc, rc);
8362
8363 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8364 * execution engines about whether hyper BPs and such are pending. */
8365 uint32_t const uDr7 = pCtx->dr[7];
8366 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8367 && X86_DR7_ANY_RW_IO(uDr7)
8368 && (pCtx->cr4 & X86_CR4_DE))
8369 || DBGFBpIsHwIoArmed(pVM)))
8370 {
8371 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8372
8373#ifndef IN_NEM_DARWIN
8374 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8375 VMMRZCallRing3Disable(pVCpu);
8376 HM_DISABLE_PREEMPT(pVCpu);
8377
8378 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8379
8380 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8381 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8382 {
8383 /* Raise #DB. */
8384 if (fIsGuestDbgActive)
8385 ASMSetDR6(pCtx->dr[6]);
8386 if (pCtx->dr[7] != uDr7)
8387 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8388
8389 vmxHCSetPendingXcptDB(pVCpu);
8390 }
8391 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8392 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8393 else if ( rcStrict2 != VINF_SUCCESS
8394 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8395 rcStrict = rcStrict2;
8396 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8397
8398 HM_RESTORE_PREEMPT();
8399 VMMRZCallRing3Enable(pVCpu);
8400#else
8401 /** @todo */
8402#endif
8403 }
8404 }
8405
8406#ifdef VBOX_STRICT
8407 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8408 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8409 Assert(!fIOWrite);
8410 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8411 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8412 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8413 Assert(fIOWrite);
8414 else
8415 {
8416# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8417 * statuses, that the VMM device and some others may return. See
8418 * IOM_SUCCESS() for guidance. */
8419 AssertMsg( RT_FAILURE(rcStrict)
8420 || rcStrict == VINF_SUCCESS
8421 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8422 || rcStrict == VINF_EM_DBG_BREAKPOINT
8423 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8424 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8425# endif
8426 }
8427#endif
8428 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8429 }
8430 else
8431 {
8432 /*
8433 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8434 */
8435 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8436 AssertRCReturn(rc2, rc2);
8437 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8438 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8439 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8440 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8441 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8442 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8443
8444 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8445 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8446
8447 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8448 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8449 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8450 }
8451 return rcStrict;
8452}
8453
8454
8455/**
8456 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8457 * VM-exit.
8458 */
8459HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8460{
8461 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8462
8463 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8464 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8465 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8466 {
8467 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8468 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8469 {
8470 uint32_t uErrCode;
8471 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8472 {
8473 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8474 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8475 }
8476 else
8477 uErrCode = 0;
8478
8479 RTGCUINTPTR GCPtrFaultAddress;
8480 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8481 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8482 else
8483 GCPtrFaultAddress = 0;
8484
8485 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8486
8487 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8488 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8489
8490 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8491 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8492 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8493 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8494 }
8495 }
8496
8497 /* Fall back to the interpreter to emulate the task-switch. */
8498 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8499 return VERR_EM_INTERPRETER;
8500}
8501
8502
8503/**
8504 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8505 */
8506HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8507{
8508 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8509
8510 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8511 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8512 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8513 AssertRC(rc);
8514 return VINF_EM_DBG_STEPPED;
8515}
8516
8517
8518/**
8519 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8520 */
8521HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8522{
8523 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8524 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8525
8526 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8527 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8528 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8529 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8530 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8531
8532 /*
8533 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8534 */
8535 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8536 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8537 {
8538 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8539 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8540 {
8541 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8542 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8543 }
8544 }
8545 else
8546 {
8547 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8548 return rcStrict;
8549 }
8550
8551 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8552 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8553 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8554 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8555 AssertRCReturn(rc, rc);
8556
8557 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8558 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8559 switch (uAccessType)
8560 {
8561#ifndef IN_NEM_DARWIN
8562 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8563 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8564 {
8565 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8566 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8567 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8568
8569 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8570 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8571 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8572 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8573 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8574
8575 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8576 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8577 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8578 if ( rcStrict == VINF_SUCCESS
8579 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8580 || rcStrict == VERR_PAGE_NOT_PRESENT)
8581 {
8582 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8583 | HM_CHANGED_GUEST_APIC_TPR);
8584 rcStrict = VINF_SUCCESS;
8585 }
8586 break;
8587 }
8588#else
8589 /** @todo */
8590#endif
8591
8592 default:
8593 {
8594 Log4Func(("uAccessType=%#x\n", uAccessType));
8595 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8596 break;
8597 }
8598 }
8599
8600 if (rcStrict != VINF_SUCCESS)
8601 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8602 return rcStrict;
8603}
8604
8605
8606/**
8607 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8608 * VM-exit.
8609 */
8610HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8611{
8612 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8613 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8614
8615 /*
8616 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8617 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8618 * must emulate the MOV DRx access.
8619 */
8620 if (!pVmxTransient->fIsNestedGuest)
8621 {
8622 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8623 if (pVmxTransient->fWasGuestDebugStateActive)
8624 {
8625 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8626 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8627 }
8628
8629 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8630 && !pVmxTransient->fWasHyperDebugStateActive)
8631 {
8632 Assert(!DBGFIsStepping(pVCpu));
8633 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8634
8635 /* Don't intercept MOV DRx any more. */
8636 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8637 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8638 AssertRC(rc);
8639
8640#ifndef IN_NEM_DARWIN
8641 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8642 VMMRZCallRing3Disable(pVCpu);
8643 HM_DISABLE_PREEMPT(pVCpu);
8644
8645 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8646 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8647 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8648
8649 HM_RESTORE_PREEMPT();
8650 VMMRZCallRing3Enable(pVCpu);
8651#else
8652 CPUMR3NemActivateGuestDebugState(pVCpu);
8653 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8654 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8655#endif
8656
8657#ifdef VBOX_WITH_STATISTICS
8658 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8659 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8660 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8661 else
8662 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8663#endif
8664 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8665 return VINF_SUCCESS;
8666 }
8667 }
8668
8669 /*
8670 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8671 * The EFER MSR is always up-to-date.
8672 * Update the segment registers and DR7 from the CPU.
8673 */
8674 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8675 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8676 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8677 AssertRCReturn(rc, rc);
8678 Log4Func(("cs:rip=%#04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
8679
8680 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8681 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8682 {
8683 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8684 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8685 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8686 if (RT_SUCCESS(rc))
8687 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8688 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8689 }
8690 else
8691 {
8692 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8693 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8694 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8695 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8696 }
8697
8698 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8699 if (RT_SUCCESS(rc))
8700 {
8701 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8702 AssertRCReturn(rc2, rc2);
8703 return VINF_SUCCESS;
8704 }
8705 return rc;
8706}
8707
8708
8709/**
8710 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8711 * Conditional VM-exit.
8712 */
8713HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8714{
8715 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8716
8717#ifndef IN_NEM_DARWIN
8718 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8719
8720 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8721 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8722 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8723 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8724 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8725
8726 /*
8727 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8728 */
8729 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8730 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8731 {
8732 /*
8733 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8734 * instruction emulation to inject the original event. Otherwise, injecting the original event
8735 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8736 */
8737 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8738 { /* likely */ }
8739 else
8740 {
8741 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8742#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8743 /** @todo NSTVMX: Think about how this should be handled. */
8744 if (pVmxTransient->fIsNestedGuest)
8745 return VERR_VMX_IPE_3;
8746#endif
8747 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8748 }
8749 }
8750 else
8751 {
8752 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8753 return rcStrict;
8754 }
8755
8756 /*
8757 * Get sufficient state and update the exit history entry.
8758 */
8759 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8760 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8761 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8762 AssertRCReturn(rc, rc);
8763
8764 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8765 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8766 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8767 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8768 if (!pExitRec)
8769 {
8770 /*
8771 * If we succeed, resume guest execution.
8772 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8773 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8774 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8775 * weird case. See @bugref{6043}.
8776 */
8777 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8778 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8779/** @todo bird: We can probably just go straight to IOM here and assume that
8780 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8781 * well. However, we need to address that aliasing workarounds that
8782 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8783 *
8784 * Might also be interesting to see if we can get this done more or
8785 * less locklessly inside IOM. Need to consider the lookup table
8786 * updating and use a bit more carefully first (or do all updates via
8787 * rendezvous) */
8788 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8789 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8790 if ( rcStrict == VINF_SUCCESS
8791 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8792 || rcStrict == VERR_PAGE_NOT_PRESENT)
8793 {
8794 /* Successfully handled MMIO operation. */
8795 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8796 | HM_CHANGED_GUEST_APIC_TPR);
8797 rcStrict = VINF_SUCCESS;
8798 }
8799 }
8800 else
8801 {
8802 /*
8803 * Frequent exit or something needing probing. Call EMHistoryExec.
8804 */
8805 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8806 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8807
8808 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8809 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8810
8811 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8812 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8813 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8814 }
8815 return rcStrict;
8816#else
8817 AssertFailed();
8818 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8819#endif
8820}
8821
8822
8823/**
8824 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8825 * VM-exit.
8826 */
8827HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8828{
8829 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8830#ifndef IN_NEM_DARWIN
8831 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8832
8833 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8834 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8835 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8836 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8837 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8838 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8839
8840 /*
8841 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8842 */
8843 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8844 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8845 {
8846 /*
8847 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8848 * we shall resolve the nested #PF and re-inject the original event.
8849 */
8850 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8851 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8852 }
8853 else
8854 {
8855 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8856 return rcStrict;
8857 }
8858
8859 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8860 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8861 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8862 AssertRCReturn(rc, rc);
8863
8864 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8865 uint64_t const uExitQual = pVmxTransient->uExitQual;
8866 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8867
8868 RTGCUINT uErrorCode = 0;
8869 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8870 uErrorCode |= X86_TRAP_PF_ID;
8871 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8872 uErrorCode |= X86_TRAP_PF_RW;
8873 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8874 uErrorCode |= X86_TRAP_PF_P;
8875
8876 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8877 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8878
8879 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8880
8881 /*
8882 * Handle the pagefault trap for the nested shadow table.
8883 */
8884 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8885 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8886 TRPMResetTrap(pVCpu);
8887
8888 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8889 if ( rcStrict == VINF_SUCCESS
8890 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8891 || rcStrict == VERR_PAGE_NOT_PRESENT)
8892 {
8893 /* Successfully synced our nested page tables. */
8894 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8896 return VINF_SUCCESS;
8897 }
8898#else
8899 PVM pVM = pVCpu->CTX_SUFF(pVM);
8900 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8901 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8902 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8903 vmxHCImportGuestRip(pVCpu);
8904 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8905
8906 /*
8907 * Ask PGM for information about the given GCPhys. We need to check if we're
8908 * out of sync first.
8909 */
8910 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8911 PGMPHYSNEMPAGEINFO Info;
8912 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8913 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8914 if (RT_SUCCESS(rc))
8915 {
8916 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8917 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8918 {
8919 if (State.fCanResume)
8920 {
8921 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8922 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8923 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8924 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8925 State.fDidSomething ? "" : " no-change"));
8926 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8927 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8928 return VINF_SUCCESS;
8929 }
8930 }
8931
8932 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8933 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8934 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8935 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8936 State.fDidSomething ? "" : " no-change"));
8937 }
8938 else
8939 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8940 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8941 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8942
8943 /*
8944 * Emulate the memory access, either access handler or special memory.
8945 */
8946 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8947 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8948 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8949 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8950 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8951
8952 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8953 AssertRCReturn(rc, rc);
8954
8955 VBOXSTRICTRC rcStrict;
8956 if (!pExitRec)
8957 rcStrict = IEMExecOne(pVCpu);
8958 else
8959 {
8960 /* Frequent access or probing. */
8961 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8962 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8963 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8964 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8965 }
8966
8967 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8968#endif
8969
8970 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8971 return rcStrict;
8972}
8973
8974#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8975
8976/**
8977 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8978 */
8979HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8980{
8981 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8982
8983 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8984 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8985 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8986 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8987 | CPUMCTX_EXTRN_HWVIRT
8988 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8989 AssertRCReturn(rc, rc);
8990
8991 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8992
8993 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
8994 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8995
8996 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8997 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8998 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8999 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9000 {
9001 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9002 rcStrict = VINF_SUCCESS;
9003 }
9004 return rcStrict;
9005}
9006
9007
9008/**
9009 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9010 */
9011HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9012{
9013 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9014
9015 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9016 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9017 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9018 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9019 AssertRCReturn(rc, rc);
9020
9021 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9022
9023 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9024 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9025 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9026 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9027 {
9028 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9029 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9030 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9031 }
9032 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9033 return rcStrict;
9034}
9035
9036
9037/**
9038 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9039 */
9040HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9041{
9042 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9043
9044 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9045 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9046 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9047 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9048 | CPUMCTX_EXTRN_HWVIRT
9049 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9050 AssertRCReturn(rc, rc);
9051
9052 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9053
9054 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9055 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9056
9057 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9058 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9059 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9060 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9061 {
9062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9063 rcStrict = VINF_SUCCESS;
9064 }
9065 return rcStrict;
9066}
9067
9068
9069/**
9070 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9071 */
9072HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9073{
9074 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9075
9076 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9077 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9078 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9079 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9080 | CPUMCTX_EXTRN_HWVIRT
9081 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9082 AssertRCReturn(rc, rc);
9083
9084 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9085
9086 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9087 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9088
9089 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9090 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9091 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9092 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9093 {
9094 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9095 rcStrict = VINF_SUCCESS;
9096 }
9097 return rcStrict;
9098}
9099
9100
9101/**
9102 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9103 */
9104HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9105{
9106 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9107
9108 /*
9109 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9110 * thus might not need to import the shadow VMCS state, it's safer just in case
9111 * code elsewhere dares look at unsynced VMCS fields.
9112 */
9113 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9114 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9115 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9116 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9117 | CPUMCTX_EXTRN_HWVIRT
9118 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9119 AssertRCReturn(rc, rc);
9120
9121 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9122
9123 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9124 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9125 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9126
9127 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9128 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9129 {
9130 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9131
9132# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9133 /* Try for exit optimization. This is on the following instruction
9134 because it would be a waste of time to have to reinterpret the
9135 already decoded vmwrite instruction. */
9136 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9137 if (pExitRec)
9138 {
9139 /* Frequent access or probing. */
9140 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9141 AssertRCReturn(rc, rc);
9142
9143 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9144 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9145 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9146 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9147 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9148 }
9149# endif
9150 }
9151 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9152 {
9153 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9154 rcStrict = VINF_SUCCESS;
9155 }
9156 return rcStrict;
9157}
9158
9159
9160/**
9161 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9162 */
9163HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9164{
9165 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9166
9167 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9168 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9169 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9170 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9171 AssertRCReturn(rc, rc);
9172
9173 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9174
9175 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9176 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9177 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9178 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9179 {
9180 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9181 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9182 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9183 }
9184 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9185 return rcStrict;
9186}
9187
9188
9189/**
9190 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9191 */
9192HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9193{
9194 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9195
9196 /*
9197 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9198 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9199 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9200 */
9201 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9202 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9203 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9204 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9205 | CPUMCTX_EXTRN_HWVIRT
9206 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9207 AssertRCReturn(rc, rc);
9208
9209 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9210
9211 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9212 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9213 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9214
9215 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9216 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9217 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9218 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9219 {
9220 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9221 rcStrict = VINF_SUCCESS;
9222 }
9223 return rcStrict;
9224}
9225
9226
9227/**
9228 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9229 */
9230HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9231{
9232 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9233
9234 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9235 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9236 | CPUMCTX_EXTRN_HWVIRT
9237 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9238 AssertRCReturn(rc, rc);
9239
9240 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9241
9242 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9243 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9244 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9245 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9246 {
9247 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9248 rcStrict = VINF_SUCCESS;
9249 }
9250 return rcStrict;
9251}
9252
9253
9254/**
9255 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9256 */
9257HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9258{
9259 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9260
9261 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9262 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9263 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9264 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9265 | CPUMCTX_EXTRN_HWVIRT
9266 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9267 AssertRCReturn(rc, rc);
9268
9269 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9270
9271 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9272 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9273
9274 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9275 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9276 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9277 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9278 {
9279 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9280 rcStrict = VINF_SUCCESS;
9281 }
9282 return rcStrict;
9283}
9284
9285
9286/**
9287 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9288 */
9289HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9290{
9291 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9292
9293 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9294 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9295 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9296 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9297 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9298 AssertRCReturn(rc, rc);
9299
9300 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9301
9302 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9303 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9304
9305 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9306 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9307 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9308 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9309 {
9310 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9311 rcStrict = VINF_SUCCESS;
9312 }
9313 return rcStrict;
9314}
9315
9316
9317# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9318/**
9319 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9320 */
9321HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9322{
9323 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9324
9325 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9326 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9327 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9328 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9329 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9330 AssertRCReturn(rc, rc);
9331
9332 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9333
9334 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9335 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9336
9337 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9338 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9339 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9340 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9341 {
9342 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9343 rcStrict = VINF_SUCCESS;
9344 }
9345 return rcStrict;
9346}
9347# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9348#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9349/** @} */
9350
9351
9352#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9353/** @name Nested-guest VM-exit handlers.
9354 * @{
9355 */
9356/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9357/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9358/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9359
9360/**
9361 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9362 * Conditional VM-exit.
9363 */
9364HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9365{
9366 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9367
9368 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9369
9370 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9371 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9372 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9373
9374 switch (uExitIntType)
9375 {
9376# ifndef IN_NEM_DARWIN
9377 /*
9378 * Physical NMIs:
9379 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9380 */
9381 case VMX_EXIT_INT_INFO_TYPE_NMI:
9382 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9383# endif
9384
9385 /*
9386 * Hardware exceptions,
9387 * Software exceptions,
9388 * Privileged software exceptions:
9389 * Figure out if the exception must be delivered to the guest or the nested-guest.
9390 */
9391 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9392 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9393 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9394 {
9395 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9396 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9397 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9398 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9399
9400 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9401 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9402 pVmxTransient->uExitIntErrorCode);
9403 if (fIntercept)
9404 {
9405 /* Exit qualification is required for debug and page-fault exceptions. */
9406 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9407
9408 /*
9409 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9410 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9411 * length. However, if delivery of a software interrupt, software exception or privileged
9412 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9413 */
9414 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9415
9416 VMXVEXITEVENTINFO ExitEventInfo;
9417 RT_ZERO(ExitEventInfo);
9418 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9419 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9420 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9421 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9422
9423#ifdef DEBUG_ramshankar
9424 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9425 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9426 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9427 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9428 {
9429 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9430 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9431 }
9432#endif
9433 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9434 }
9435
9436 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9437 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9438 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9439 }
9440
9441 /*
9442 * Software interrupts:
9443 * VM-exits cannot be caused by software interrupts.
9444 *
9445 * External interrupts:
9446 * This should only happen when "acknowledge external interrupts on VM-exit"
9447 * control is set. However, we never set this when executing a guest or
9448 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9449 * the guest.
9450 */
9451 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9452 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9453 default:
9454 {
9455 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9456 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9457 }
9458 }
9459}
9460
9461
9462/**
9463 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9464 * Unconditional VM-exit.
9465 */
9466HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9467{
9468 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9469 return IEMExecVmxVmexitTripleFault(pVCpu);
9470}
9471
9472
9473/**
9474 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9475 */
9476HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9477{
9478 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9479
9480 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9481 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9482 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9483}
9484
9485
9486/**
9487 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9488 */
9489HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9490{
9491 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9492
9493 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9494 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9495 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9496}
9497
9498
9499/**
9500 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9501 * Unconditional VM-exit.
9502 */
9503HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9504{
9505 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9506
9507 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9508 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9509 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9510 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9511
9512 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9513
9514 VMXVEXITEVENTINFO ExitEventInfo;
9515 RT_ZERO(ExitEventInfo);
9516 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9517 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9518 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9519}
9520
9521
9522/**
9523 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9524 */
9525HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9526{
9527 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9528
9529 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9530 {
9531 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9532 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9533 }
9534 return vmxHCExitHlt(pVCpu, pVmxTransient);
9535}
9536
9537
9538/**
9539 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9540 */
9541HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9542{
9543 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9544
9545 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9546 {
9547 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9548 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9549
9550 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9551 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9552 }
9553 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9554}
9555
9556
9557/**
9558 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9559 */
9560HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9561{
9562 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9563
9564 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9565 {
9566 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9567 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9568 }
9569 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9570}
9571
9572
9573/**
9574 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9575 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9576 */
9577HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9578{
9579 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9580
9581 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9582 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9583
9584 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9585
9586 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9587 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9588 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9589
9590 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9591 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9592 u64VmcsField &= UINT64_C(0xffffffff);
9593
9594 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9595 {
9596 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9597 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9598
9599 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9600 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9601 }
9602
9603 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9604 return vmxHCExitVmread(pVCpu, pVmxTransient);
9605 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9606}
9607
9608
9609/**
9610 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9611 */
9612HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9613{
9614 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9615
9616 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9617 {
9618 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9619 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9620 }
9621
9622 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9623}
9624
9625
9626/**
9627 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9628 * Conditional VM-exit.
9629 */
9630HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9631{
9632 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9633
9634 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9635 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9636
9637 VBOXSTRICTRC rcStrict;
9638 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9639 switch (uAccessType)
9640 {
9641 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9642 {
9643 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9644 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9645 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9646 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9647
9648 bool fIntercept;
9649 switch (iCrReg)
9650 {
9651 case 0:
9652 case 4:
9653 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9654 break;
9655
9656 case 3:
9657 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9658 break;
9659
9660 case 8:
9661 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9662 break;
9663
9664 default:
9665 fIntercept = false;
9666 break;
9667 }
9668 if (fIntercept)
9669 {
9670 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9671 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9672 }
9673 else
9674 {
9675 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9676 AssertRCReturn(rc, rc);
9677 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9678 }
9679 break;
9680 }
9681
9682 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9683 {
9684 /*
9685 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9686 * CR2 reads do not cause a VM-exit.
9687 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9688 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9689 */
9690 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9691 if ( iCrReg == 3
9692 || iCrReg == 8)
9693 {
9694 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9695 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9696 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9697 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9698 {
9699 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9700 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9701 }
9702 else
9703 {
9704 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9705 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9706 }
9707 }
9708 else
9709 {
9710 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9711 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9712 }
9713 break;
9714 }
9715
9716 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9717 {
9718 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9719 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9720 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9721 if ( (uGstHostMask & X86_CR0_TS)
9722 && (uReadShadow & X86_CR0_TS))
9723 {
9724 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9725 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9726 }
9727 else
9728 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9729 break;
9730 }
9731
9732 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9733 {
9734 RTGCPTR GCPtrEffDst;
9735 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9736 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9737 if (fMemOperand)
9738 {
9739 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9740 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9741 }
9742 else
9743 GCPtrEffDst = NIL_RTGCPTR;
9744
9745 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9746 {
9747 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9748 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9749 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9750 }
9751 else
9752 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9753 break;
9754 }
9755
9756 default:
9757 {
9758 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9759 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9760 }
9761 }
9762
9763 if (rcStrict == VINF_IEM_RAISED_XCPT)
9764 {
9765 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9766 rcStrict = VINF_SUCCESS;
9767 }
9768 return rcStrict;
9769}
9770
9771
9772/**
9773 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9774 * Conditional VM-exit.
9775 */
9776HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9777{
9778 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9779
9780 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9781 {
9782 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9783 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9784
9785 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9786 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9787 }
9788 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9789}
9790
9791
9792/**
9793 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9794 * Conditional VM-exit.
9795 */
9796HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9797{
9798 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9799
9800 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9801
9802 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9803 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9804 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9805
9806 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9807 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9808 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9809 {
9810 /*
9811 * IN/OUT instruction:
9812 * - Provides VM-exit instruction length.
9813 *
9814 * INS/OUTS instruction:
9815 * - Provides VM-exit instruction length.
9816 * - Provides Guest-linear address.
9817 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9818 */
9819 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9820 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9821
9822 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9823 pVmxTransient->ExitInstrInfo.u = 0;
9824 pVmxTransient->uGuestLinearAddr = 0;
9825
9826 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9827 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9828 if (fIOString)
9829 {
9830 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9831 if (fVmxInsOutsInfo)
9832 {
9833 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9834 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9835 }
9836 }
9837
9838 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
9839 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9840 }
9841 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9842}
9843
9844
9845/**
9846 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9847 */
9848HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9849{
9850 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9851
9852 uint32_t fMsrpm;
9853 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9854 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9855 else
9856 fMsrpm = VMXMSRPM_EXIT_RD;
9857
9858 if (fMsrpm & VMXMSRPM_EXIT_RD)
9859 {
9860 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9861 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9862 }
9863 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9864}
9865
9866
9867/**
9868 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9869 */
9870HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9871{
9872 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9873
9874 uint32_t fMsrpm;
9875 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9876 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9877 else
9878 fMsrpm = VMXMSRPM_EXIT_WR;
9879
9880 if (fMsrpm & VMXMSRPM_EXIT_WR)
9881 {
9882 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9883 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9884 }
9885 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9886}
9887
9888
9889/**
9890 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9891 */
9892HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9893{
9894 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9895
9896 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9897 {
9898 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9899 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9900 }
9901 return vmxHCExitMwait(pVCpu, pVmxTransient);
9902}
9903
9904
9905/**
9906 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9907 * VM-exit.
9908 */
9909HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9910{
9911 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9912
9913 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9914 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9915 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
9916 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9917}
9918
9919
9920/**
9921 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9922 */
9923HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9924{
9925 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9926
9927 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9928 {
9929 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9930 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9931 }
9932 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9933}
9934
9935
9936/**
9937 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9938 */
9939HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9940{
9941 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9942
9943 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9944 * PAUSE when executing a nested-guest? If it does not, we would not need
9945 * to check for the intercepts here. Just call VM-exit... */
9946
9947 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9948 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9949 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9950 {
9951 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9952 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9953 }
9954 return vmxHCExitPause(pVCpu, pVmxTransient);
9955}
9956
9957
9958/**
9959 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9960 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9961 */
9962HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9963{
9964 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9965
9966 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9967 {
9968 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9969 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
9970 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9971 }
9972 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9973}
9974
9975
9976/**
9977 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9978 * VM-exit.
9979 */
9980HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9981{
9982 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9983
9984 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9985 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9986 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9987 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9988
9989 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9990
9991 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
9992 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
9993
9994 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9995
9996 VMXVEXITEVENTINFO ExitEventInfo;
9997 RT_ZERO(ExitEventInfo);
9998 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9999 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10000 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10001}
10002
10003
10004/**
10005 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10006 * Conditional VM-exit.
10007 */
10008HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10009{
10010 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10011
10012 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10013 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10014 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10015}
10016
10017
10018/**
10019 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10020 * Conditional VM-exit.
10021 */
10022HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10023{
10024 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10025
10026 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10027 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10028 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10029}
10030
10031
10032/**
10033 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10034 */
10035HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10036{
10037 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10038
10039 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10040 {
10041 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10042 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10043 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10044 }
10045 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10046}
10047
10048
10049/**
10050 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10051 */
10052HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10053{
10054 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10055
10056 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10057 {
10058 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10059 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10060 }
10061 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10062}
10063
10064
10065/**
10066 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10067 */
10068HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10069{
10070 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10071
10072 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10073 {
10074 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10075 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10076 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10077 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10078
10079 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10080 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10081 }
10082 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10083}
10084
10085
10086/**
10087 * Nested-guest VM-exit handler for invalid-guest state
10088 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10089 */
10090HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10091{
10092 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10093
10094 /*
10095 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10096 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10097 * Handle it like it's in an invalid guest state of the outer guest.
10098 *
10099 * When the fast path is implemented, this should be changed to cause the corresponding
10100 * nested-guest VM-exit.
10101 */
10102 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10103}
10104
10105
10106/**
10107 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10108 * and only provide the instruction length.
10109 *
10110 * Unconditional VM-exit.
10111 */
10112HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10113{
10114 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10115
10116#ifdef VBOX_STRICT
10117 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10118 switch (pVmxTransient->uExitReason)
10119 {
10120 case VMX_EXIT_ENCLS:
10121 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10122 break;
10123
10124 case VMX_EXIT_VMFUNC:
10125 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10126 break;
10127 }
10128#endif
10129
10130 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10131 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10132}
10133
10134
10135/**
10136 * Nested-guest VM-exit handler for instructions that provide instruction length as
10137 * well as more information.
10138 *
10139 * Unconditional VM-exit.
10140 */
10141HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10142{
10143 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10144
10145# ifdef VBOX_STRICT
10146 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10147 switch (pVmxTransient->uExitReason)
10148 {
10149 case VMX_EXIT_GDTR_IDTR_ACCESS:
10150 case VMX_EXIT_LDTR_TR_ACCESS:
10151 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10152 break;
10153
10154 case VMX_EXIT_RDRAND:
10155 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10156 break;
10157
10158 case VMX_EXIT_RDSEED:
10159 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10160 break;
10161
10162 case VMX_EXIT_XSAVES:
10163 case VMX_EXIT_XRSTORS:
10164 /** @todo NSTVMX: Verify XSS-bitmap. */
10165 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10166 break;
10167
10168 case VMX_EXIT_UMWAIT:
10169 case VMX_EXIT_TPAUSE:
10170 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10171 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10172 break;
10173
10174 case VMX_EXIT_LOADIWKEY:
10175 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10176 break;
10177 }
10178# endif
10179
10180 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10181 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10182 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10183
10184 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10185 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10186}
10187
10188# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10189
10190/**
10191 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10192 * Conditional VM-exit.
10193 */
10194HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10195{
10196 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10197 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10198
10199//# define DSL_IRQ_FIX_1
10200# define DSL_IRQ_FIX_2
10201
10202 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10203 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10204 {
10205 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10206 AssertRCReturn(rc, rc);
10207
10208 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10209 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10210# ifdef DSL_IRQ_FIX_2
10211 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
10212 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10213 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10214 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10215 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10216
10217 /*
10218 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10219 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10220 * it's its problem to deal with that issue and we'll clear the recovered event.
10221 */
10222 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10223 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10224 { /*likely*/ }
10225 else
10226 {
10227 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10228 return rcStrict;
10229 }
10230 bool const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10231# else
10232 VBOXSTRICTRC rcStrict;
10233# endif
10234
10235 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10236 uint64_t const uExitQual = pVmxTransient->uExitQual;
10237
10238 RTGCPTR GCPtrNestedFault;
10239 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10240 if (fIsLinearAddrValid)
10241 {
10242 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10243 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10244 }
10245 else
10246 GCPtrNestedFault = 0;
10247
10248 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10249 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10250 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10251 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10252 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10253
10254 PGMPTWALK Walk;
10255 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10256 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx),
10257 GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault,
10258 &Walk);
10259 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10260 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10261 if (RT_SUCCESS(rcStrict))
10262 {
10263# ifdef DSL_IRQ_FIX_1
10264 /*
10265 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10266 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10267 * it's its problem to deal with that issue. This means that it's troublesome to
10268 * call vmxHCCheckExitDueToEventDelivery before PGMR0NestedTrap0eHandlerNestedPaging
10269 * have decided who's VMEXIT it is. Unfortunately, we're a bit of a pickle then if
10270 * we end up with an informational status here, as we _must_ _not_ drop events either.
10271 */
10272 /** @todo need better solution for this. Better solution should probably be
10273 * applied to other exits too... */
10274 if (rcStrict == VINF_SUCCESS)
10275 {
10276 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
10277 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10278 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10279 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10280 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10281
10282 vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10283 }
10284# endif
10285 return rcStrict;
10286 }
10287
10288 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10289 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10290 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10291# ifdef DSL_IRQ_FIX_2
10292 if (fClearEventOnForward)
10293 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10294# endif
10295
10296 VMXVEXITEVENTINFO ExitEventInfo;
10297 RT_ZERO(ExitEventInfo);
10298 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10299 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10300
10301 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10302 {
10303 VMXVEXITINFO const ExitInfo
10304 = VMXVEXITINFO_INIT_WITH_QUALIFIER_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10305 pVmxTransient->uExitQual,
10306 pVmxTransient->cbExitInstr,
10307 pVmxTransient->uGuestLinearAddr,
10308 pVmxTransient->uGuestPhysicalAddr);
10309 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10310 }
10311
10312 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10313 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10314 }
10315
10316 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10317}
10318
10319
10320/**
10321 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10322 * Conditional VM-exit.
10323 */
10324HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10325{
10326 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10327 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10328
10329 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10330 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10331 {
10332 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_ALL);
10333 AssertRCReturn(rc, rc);
10334
10335 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10336
10337 PGMPTWALK Walk;
10338 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10339 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10340 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10341 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10342 0 /* GCPtrNestedFault */, &Walk);
10343 if (RT_SUCCESS(rcStrict))
10344 {
10345 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10346 return rcStrict;
10347 }
10348
10349 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10350 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10351 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10352
10353 VMXVEXITEVENTINFO ExitEventInfo;
10354 RT_ZERO(ExitEventInfo);
10355 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10356 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10357
10358 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10359 }
10360
10361 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10362}
10363
10364# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10365
10366/** @} */
10367#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10368
10369
10370/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10371 * probes.
10372 *
10373 * The following few functions and associated structure contains the bloat
10374 * necessary for providing detailed debug events and dtrace probes as well as
10375 * reliable host side single stepping. This works on the principle of
10376 * "subclassing" the normal execution loop and workers. We replace the loop
10377 * method completely and override selected helpers to add necessary adjustments
10378 * to their core operation.
10379 *
10380 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10381 * any performance for debug and analysis features.
10382 *
10383 * @{
10384 */
10385
10386/**
10387 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10388 * the debug run loop.
10389 */
10390typedef struct VMXRUNDBGSTATE
10391{
10392 /** The RIP we started executing at. This is for detecting that we stepped. */
10393 uint64_t uRipStart;
10394 /** The CS we started executing with. */
10395 uint16_t uCsStart;
10396
10397 /** Whether we've actually modified the 1st execution control field. */
10398 bool fModifiedProcCtls : 1;
10399 /** Whether we've actually modified the 2nd execution control field. */
10400 bool fModifiedProcCtls2 : 1;
10401 /** Whether we've actually modified the exception bitmap. */
10402 bool fModifiedXcptBitmap : 1;
10403
10404 /** We desire the modified the CR0 mask to be cleared. */
10405 bool fClearCr0Mask : 1;
10406 /** We desire the modified the CR4 mask to be cleared. */
10407 bool fClearCr4Mask : 1;
10408 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10409 uint32_t fCpe1Extra;
10410 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10411 uint32_t fCpe1Unwanted;
10412 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10413 uint32_t fCpe2Extra;
10414 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10415 uint32_t bmXcptExtra;
10416 /** The sequence number of the Dtrace provider settings the state was
10417 * configured against. */
10418 uint32_t uDtraceSettingsSeqNo;
10419 /** VM-exits to check (one bit per VM-exit). */
10420 uint32_t bmExitsToCheck[3];
10421
10422 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10423 uint32_t fProcCtlsInitial;
10424 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10425 uint32_t fProcCtls2Initial;
10426 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10427 uint32_t bmXcptInitial;
10428} VMXRUNDBGSTATE;
10429AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10430typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10431
10432
10433/**
10434 * Initializes the VMXRUNDBGSTATE structure.
10435 *
10436 * @param pVCpu The cross context virtual CPU structure of the
10437 * calling EMT.
10438 * @param pVmxTransient The VMX-transient structure.
10439 * @param pDbgState The debug state to initialize.
10440 */
10441static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10442{
10443 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10444 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10445
10446 pDbgState->fModifiedProcCtls = false;
10447 pDbgState->fModifiedProcCtls2 = false;
10448 pDbgState->fModifiedXcptBitmap = false;
10449 pDbgState->fClearCr0Mask = false;
10450 pDbgState->fClearCr4Mask = false;
10451 pDbgState->fCpe1Extra = 0;
10452 pDbgState->fCpe1Unwanted = 0;
10453 pDbgState->fCpe2Extra = 0;
10454 pDbgState->bmXcptExtra = 0;
10455 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10456 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10457 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10458}
10459
10460
10461/**
10462 * Updates the VMSC fields with changes requested by @a pDbgState.
10463 *
10464 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10465 * immediately before executing guest code, i.e. when interrupts are disabled.
10466 * We don't check status codes here as we cannot easily assert or return in the
10467 * latter case.
10468 *
10469 * @param pVCpu The cross context virtual CPU structure.
10470 * @param pVmxTransient The VMX-transient structure.
10471 * @param pDbgState The debug state.
10472 */
10473static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10474{
10475 /*
10476 * Ensure desired flags in VMCS control fields are set.
10477 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10478 *
10479 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10480 * there should be no stale data in pCtx at this point.
10481 */
10482 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10483 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10484 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10485 {
10486 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10487 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10488 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10489 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10490 pDbgState->fModifiedProcCtls = true;
10491 }
10492
10493 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10494 {
10495 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10496 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10497 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10498 pDbgState->fModifiedProcCtls2 = true;
10499 }
10500
10501 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10502 {
10503 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10504 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10505 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10506 pDbgState->fModifiedXcptBitmap = true;
10507 }
10508
10509 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10510 {
10511 pVmcsInfo->u64Cr0Mask = 0;
10512 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10513 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10514 }
10515
10516 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10517 {
10518 pVmcsInfo->u64Cr4Mask = 0;
10519 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10520 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10521 }
10522
10523 NOREF(pVCpu);
10524}
10525
10526
10527/**
10528 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10529 * re-entry next time around.
10530 *
10531 * @returns Strict VBox status code (i.e. informational status codes too).
10532 * @param pVCpu The cross context virtual CPU structure.
10533 * @param pVmxTransient The VMX-transient structure.
10534 * @param pDbgState The debug state.
10535 * @param rcStrict The return code from executing the guest using single
10536 * stepping.
10537 */
10538static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10539 VBOXSTRICTRC rcStrict)
10540{
10541 /*
10542 * Restore VM-exit control settings as we may not reenter this function the
10543 * next time around.
10544 */
10545 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10546
10547 /* We reload the initial value, trigger what we can of recalculations the
10548 next time around. From the looks of things, that's all that's required atm. */
10549 if (pDbgState->fModifiedProcCtls)
10550 {
10551 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10552 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10553 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10554 AssertRC(rc2);
10555 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10556 }
10557
10558 /* We're currently the only ones messing with this one, so just restore the
10559 cached value and reload the field. */
10560 if ( pDbgState->fModifiedProcCtls2
10561 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10562 {
10563 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10564 AssertRC(rc2);
10565 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10566 }
10567
10568 /* If we've modified the exception bitmap, we restore it and trigger
10569 reloading and partial recalculation the next time around. */
10570 if (pDbgState->fModifiedXcptBitmap)
10571 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10572
10573 return rcStrict;
10574}
10575
10576
10577/**
10578 * Configures VM-exit controls for current DBGF and DTrace settings.
10579 *
10580 * This updates @a pDbgState and the VMCS execution control fields to reflect
10581 * the necessary VM-exits demanded by DBGF and DTrace.
10582 *
10583 * @param pVCpu The cross context virtual CPU structure.
10584 * @param pVmxTransient The VMX-transient structure. May update
10585 * fUpdatedTscOffsettingAndPreemptTimer.
10586 * @param pDbgState The debug state.
10587 */
10588static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10589{
10590#ifndef IN_NEM_DARWIN
10591 /*
10592 * Take down the dtrace serial number so we can spot changes.
10593 */
10594 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10595 ASMCompilerBarrier();
10596#endif
10597
10598 /*
10599 * We'll rebuild most of the middle block of data members (holding the
10600 * current settings) as we go along here, so start by clearing it all.
10601 */
10602 pDbgState->bmXcptExtra = 0;
10603 pDbgState->fCpe1Extra = 0;
10604 pDbgState->fCpe1Unwanted = 0;
10605 pDbgState->fCpe2Extra = 0;
10606 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10607 pDbgState->bmExitsToCheck[i] = 0;
10608
10609 /*
10610 * Software interrupts (INT XXh) - no idea how to trigger these...
10611 */
10612 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10613 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10614 || VBOXVMM_INT_SOFTWARE_ENABLED())
10615 {
10616 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10617 }
10618
10619 /*
10620 * INT3 breakpoints - triggered by #BP exceptions.
10621 */
10622 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10623 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10624
10625 /*
10626 * Exception bitmap and XCPT events+probes.
10627 */
10628 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10629 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10630 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10631
10632 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10633 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10634 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10635 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10636 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10637 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10638 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10639 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10640 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10641 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10642 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10643 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10644 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10645 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10646 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10647 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10648 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10649 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10650
10651 if (pDbgState->bmXcptExtra)
10652 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10653
10654 /*
10655 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10656 *
10657 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10658 * So, when adding/changing/removing please don't forget to update it.
10659 *
10660 * Some of the macros are picking up local variables to save horizontal space,
10661 * (being able to see it in a table is the lesser evil here).
10662 */
10663#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10664 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10665 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10666#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10667 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10668 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10669 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10670 } else do { } while (0)
10671#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10672 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10673 { \
10674 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10675 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10676 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10677 } else do { } while (0)
10678#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10679 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10680 { \
10681 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10682 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10683 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10684 } else do { } while (0)
10685#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10686 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10687 { \
10688 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10689 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10690 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10691 } else do { } while (0)
10692
10693 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10694 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10695 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10696 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10697 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10698
10699 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10700 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10701 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10702 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10703 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10704 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10705 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10706 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10707 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10708 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10709 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10710 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10711 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10712 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10713 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10714 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10715 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10716 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10717 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10718 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10719 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10720 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10721 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10722 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10723 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10724 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10725 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10726 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10727 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10728 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10729 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10730 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10731 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10732 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10733 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10734 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10735
10736 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10737 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10738 {
10739 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10740 | CPUMCTX_EXTRN_APIC_TPR);
10741 AssertRC(rc);
10742
10743#if 0 /** @todo fix me */
10744 pDbgState->fClearCr0Mask = true;
10745 pDbgState->fClearCr4Mask = true;
10746#endif
10747 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10748 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10749 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10750 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10751 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10752 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10753 require clearing here and in the loop if we start using it. */
10754 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10755 }
10756 else
10757 {
10758 if (pDbgState->fClearCr0Mask)
10759 {
10760 pDbgState->fClearCr0Mask = false;
10761 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10762 }
10763 if (pDbgState->fClearCr4Mask)
10764 {
10765 pDbgState->fClearCr4Mask = false;
10766 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10767 }
10768 }
10769 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10770 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10771
10772 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10773 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10774 {
10775 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10776 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10777 }
10778 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10779 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10780
10781 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10782 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10783 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10784 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10785 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10786 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10787 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10788 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10789#if 0 /** @todo too slow, fix handler. */
10790 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10791#endif
10792 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10793
10794 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10795 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10796 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10797 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10798 {
10799 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10800 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10801 }
10802 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10803 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10804 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10805 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10806
10807 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10808 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10809 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10810 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10811 {
10812 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10813 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10814 }
10815 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10816 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10817 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10818 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10819
10820 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10821 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10822 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10823 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10824 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10825 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10826 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10827 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10828 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10829 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10830 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10831 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10832 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10833 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10834 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10835 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10836 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10837 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10838 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10839 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10840 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10841 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10842
10843#undef IS_EITHER_ENABLED
10844#undef SET_ONLY_XBM_IF_EITHER_EN
10845#undef SET_CPE1_XBM_IF_EITHER_EN
10846#undef SET_CPEU_XBM_IF_EITHER_EN
10847#undef SET_CPE2_XBM_IF_EITHER_EN
10848
10849 /*
10850 * Sanitize the control stuff.
10851 */
10852 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10853 if (pDbgState->fCpe2Extra)
10854 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10855 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10856 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10857#ifndef IN_NEM_DARWIN
10858 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10859 {
10860 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10861 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10862 }
10863#else
10864 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10865 {
10866 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10867 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10868 }
10869#endif
10870
10871 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10872 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10873 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10874 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10875}
10876
10877
10878/**
10879 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10880 * appropriate.
10881 *
10882 * The caller has checked the VM-exit against the
10883 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10884 * already, so we don't have to do that either.
10885 *
10886 * @returns Strict VBox status code (i.e. informational status codes too).
10887 * @param pVCpu The cross context virtual CPU structure.
10888 * @param pVmxTransient The VMX-transient structure.
10889 * @param uExitReason The VM-exit reason.
10890 *
10891 * @remarks The name of this function is displayed by dtrace, so keep it short
10892 * and to the point. No longer than 33 chars long, please.
10893 */
10894static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10895{
10896 /*
10897 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10898 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10899 *
10900 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10901 * does. Must add/change/remove both places. Same ordering, please.
10902 *
10903 * Added/removed events must also be reflected in the next section
10904 * where we dispatch dtrace events.
10905 */
10906 bool fDtrace1 = false;
10907 bool fDtrace2 = false;
10908 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10909 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10910 uint32_t uEventArg = 0;
10911#define SET_EXIT(a_EventSubName) \
10912 do { \
10913 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10914 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10915 } while (0)
10916#define SET_BOTH(a_EventSubName) \
10917 do { \
10918 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10919 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10920 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10921 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10922 } while (0)
10923 switch (uExitReason)
10924 {
10925 case VMX_EXIT_MTF:
10926 return vmxHCExitMtf(pVCpu, pVmxTransient);
10927
10928 case VMX_EXIT_XCPT_OR_NMI:
10929 {
10930 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10931 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10932 {
10933 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10934 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10935 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10936 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10937 {
10938 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10939 {
10940 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10941 uEventArg = pVmxTransient->uExitIntErrorCode;
10942 }
10943 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10944 switch (enmEvent1)
10945 {
10946 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10947 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10948 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10949 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10950 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10951 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10952 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10953 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10954 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10955 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10956 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10957 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10958 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10959 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10960 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10961 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10962 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10963 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10964 default: break;
10965 }
10966 }
10967 else
10968 AssertFailed();
10969 break;
10970
10971 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10972 uEventArg = idxVector;
10973 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10974 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10975 break;
10976 }
10977 break;
10978 }
10979
10980 case VMX_EXIT_TRIPLE_FAULT:
10981 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
10982 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
10983 break;
10984 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
10985 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
10986 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
10987 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
10988 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
10989
10990 /* Instruction specific VM-exits: */
10991 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
10992 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
10993 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
10994 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
10995 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
10996 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
10997 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
10998 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
10999 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11000 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11001 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11002 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11003 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11004 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11005 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11006 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11007 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11008 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11009 case VMX_EXIT_MOV_CRX:
11010 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11011 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11012 SET_BOTH(CRX_READ);
11013 else
11014 SET_BOTH(CRX_WRITE);
11015 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11016 break;
11017 case VMX_EXIT_MOV_DRX:
11018 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11019 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11020 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11021 SET_BOTH(DRX_READ);
11022 else
11023 SET_BOTH(DRX_WRITE);
11024 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11025 break;
11026 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11027 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11028 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11029 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11030 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11031 case VMX_EXIT_GDTR_IDTR_ACCESS:
11032 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11033 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11034 {
11035 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11036 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11037 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11038 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11039 }
11040 break;
11041
11042 case VMX_EXIT_LDTR_TR_ACCESS:
11043 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11044 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11045 {
11046 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11047 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11048 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11049 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11050 }
11051 break;
11052
11053 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11054 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11055 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11056 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11057 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11058 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11059 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11060 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11061 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11062 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11063 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11064
11065 /* Events that aren't relevant at this point. */
11066 case VMX_EXIT_EXT_INT:
11067 case VMX_EXIT_INT_WINDOW:
11068 case VMX_EXIT_NMI_WINDOW:
11069 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11070 case VMX_EXIT_PREEMPT_TIMER:
11071 case VMX_EXIT_IO_INSTR:
11072 break;
11073
11074 /* Errors and unexpected events. */
11075 case VMX_EXIT_INIT_SIGNAL:
11076 case VMX_EXIT_SIPI:
11077 case VMX_EXIT_IO_SMI:
11078 case VMX_EXIT_SMI:
11079 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11080 case VMX_EXIT_ERR_MSR_LOAD:
11081 case VMX_EXIT_ERR_MACHINE_CHECK:
11082 case VMX_EXIT_PML_FULL:
11083 case VMX_EXIT_VIRTUALIZED_EOI:
11084 break;
11085
11086 default:
11087 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11088 break;
11089 }
11090#undef SET_BOTH
11091#undef SET_EXIT
11092
11093 /*
11094 * Dtrace tracepoints go first. We do them here at once so we don't
11095 * have to copy the guest state saving and stuff a few dozen times.
11096 * Down side is that we've got to repeat the switch, though this time
11097 * we use enmEvent since the probes are a subset of what DBGF does.
11098 */
11099 if (fDtrace1 || fDtrace2)
11100 {
11101 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11102 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11103 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11104 switch (enmEvent1)
11105 {
11106 /** @todo consider which extra parameters would be helpful for each probe. */
11107 case DBGFEVENT_END: break;
11108 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11109 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11110 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11111 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11112 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11113 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11114 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11115 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11116 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11117 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11118 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11119 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11120 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11121 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11122 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11123 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11124 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11125 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11126 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11127 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11128 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11129 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11130 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11131 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11132 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11133 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11134 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11135 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11136 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11137 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11138 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11139 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11140 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11141 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11142 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11143 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11144 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11145 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11146 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11147 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11148 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11149 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11150 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11151 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11152 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11153 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11154 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11155 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11156 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11157 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11158 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11159 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11160 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11161 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11162 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11163 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11164 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11165 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11166 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11167 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11168 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11169 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11170 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11171 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11172 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11173 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11174 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11175 }
11176 switch (enmEvent2)
11177 {
11178 /** @todo consider which extra parameters would be helpful for each probe. */
11179 case DBGFEVENT_END: break;
11180 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11181 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11182 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11183 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11184 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11185 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11186 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11187 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11188 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11189 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11190 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11191 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11192 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11193 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11194 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11195 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11196 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11197 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11198 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11199 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11200 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11201 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11202 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11203 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11204 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11205 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11206 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11207 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11208 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11209 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11210 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11211 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11212 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11213 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11214 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11215 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11216 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11217 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11218 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11219 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11220 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11221 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11222 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11223 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11224 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11225 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11226 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11227 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11228 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11229 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11230 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11231 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11232 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11233 }
11234 }
11235
11236 /*
11237 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11238 * the DBGF call will do a full check).
11239 *
11240 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11241 * Note! If we have to events, we prioritize the first, i.e. the instruction
11242 * one, in order to avoid event nesting.
11243 */
11244 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11245 if ( enmEvent1 != DBGFEVENT_END
11246 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11247 {
11248 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11249 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11250 if (rcStrict != VINF_SUCCESS)
11251 return rcStrict;
11252 }
11253 else if ( enmEvent2 != DBGFEVENT_END
11254 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11255 {
11256 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11257 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11258 if (rcStrict != VINF_SUCCESS)
11259 return rcStrict;
11260 }
11261
11262 return VINF_SUCCESS;
11263}
11264
11265
11266/**
11267 * Single-stepping VM-exit filtering.
11268 *
11269 * This is preprocessing the VM-exits and deciding whether we've gotten far
11270 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11271 * handling is performed.
11272 *
11273 * @returns Strict VBox status code (i.e. informational status codes too).
11274 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11275 * @param pVmxTransient The VMX-transient structure.
11276 * @param pDbgState The debug state.
11277 */
11278DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11279{
11280 /*
11281 * Expensive (saves context) generic dtrace VM-exit probe.
11282 */
11283 uint32_t const uExitReason = pVmxTransient->uExitReason;
11284 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11285 { /* more likely */ }
11286 else
11287 {
11288 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11289 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11290 AssertRC(rc);
11291 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11292 }
11293
11294#ifndef IN_NEM_DARWIN
11295 /*
11296 * Check for host NMI, just to get that out of the way.
11297 */
11298 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11299 { /* normally likely */ }
11300 else
11301 {
11302 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11303 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11304 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11305 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11306 }
11307#endif
11308
11309 /*
11310 * Check for single stepping event if we're stepping.
11311 */
11312 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11313 {
11314 switch (uExitReason)
11315 {
11316 case VMX_EXIT_MTF:
11317 return vmxHCExitMtf(pVCpu, pVmxTransient);
11318
11319 /* Various events: */
11320 case VMX_EXIT_XCPT_OR_NMI:
11321 case VMX_EXIT_EXT_INT:
11322 case VMX_EXIT_TRIPLE_FAULT:
11323 case VMX_EXIT_INT_WINDOW:
11324 case VMX_EXIT_NMI_WINDOW:
11325 case VMX_EXIT_TASK_SWITCH:
11326 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11327 case VMX_EXIT_APIC_ACCESS:
11328 case VMX_EXIT_EPT_VIOLATION:
11329 case VMX_EXIT_EPT_MISCONFIG:
11330 case VMX_EXIT_PREEMPT_TIMER:
11331
11332 /* Instruction specific VM-exits: */
11333 case VMX_EXIT_CPUID:
11334 case VMX_EXIT_GETSEC:
11335 case VMX_EXIT_HLT:
11336 case VMX_EXIT_INVD:
11337 case VMX_EXIT_INVLPG:
11338 case VMX_EXIT_RDPMC:
11339 case VMX_EXIT_RDTSC:
11340 case VMX_EXIT_RSM:
11341 case VMX_EXIT_VMCALL:
11342 case VMX_EXIT_VMCLEAR:
11343 case VMX_EXIT_VMLAUNCH:
11344 case VMX_EXIT_VMPTRLD:
11345 case VMX_EXIT_VMPTRST:
11346 case VMX_EXIT_VMREAD:
11347 case VMX_EXIT_VMRESUME:
11348 case VMX_EXIT_VMWRITE:
11349 case VMX_EXIT_VMXOFF:
11350 case VMX_EXIT_VMXON:
11351 case VMX_EXIT_MOV_CRX:
11352 case VMX_EXIT_MOV_DRX:
11353 case VMX_EXIT_IO_INSTR:
11354 case VMX_EXIT_RDMSR:
11355 case VMX_EXIT_WRMSR:
11356 case VMX_EXIT_MWAIT:
11357 case VMX_EXIT_MONITOR:
11358 case VMX_EXIT_PAUSE:
11359 case VMX_EXIT_GDTR_IDTR_ACCESS:
11360 case VMX_EXIT_LDTR_TR_ACCESS:
11361 case VMX_EXIT_INVEPT:
11362 case VMX_EXIT_RDTSCP:
11363 case VMX_EXIT_INVVPID:
11364 case VMX_EXIT_WBINVD:
11365 case VMX_EXIT_XSETBV:
11366 case VMX_EXIT_RDRAND:
11367 case VMX_EXIT_INVPCID:
11368 case VMX_EXIT_VMFUNC:
11369 case VMX_EXIT_RDSEED:
11370 case VMX_EXIT_XSAVES:
11371 case VMX_EXIT_XRSTORS:
11372 {
11373 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11374 AssertRCReturn(rc, rc);
11375 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11376 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11377 return VINF_EM_DBG_STEPPED;
11378 break;
11379 }
11380
11381 /* Errors and unexpected events: */
11382 case VMX_EXIT_INIT_SIGNAL:
11383 case VMX_EXIT_SIPI:
11384 case VMX_EXIT_IO_SMI:
11385 case VMX_EXIT_SMI:
11386 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11387 case VMX_EXIT_ERR_MSR_LOAD:
11388 case VMX_EXIT_ERR_MACHINE_CHECK:
11389 case VMX_EXIT_PML_FULL:
11390 case VMX_EXIT_VIRTUALIZED_EOI:
11391 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11392 break;
11393
11394 default:
11395 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11396 break;
11397 }
11398 }
11399
11400 /*
11401 * Check for debugger event breakpoints and dtrace probes.
11402 */
11403 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11404 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11405 {
11406 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11407 if (rcStrict != VINF_SUCCESS)
11408 return rcStrict;
11409 }
11410
11411 /*
11412 * Normal processing.
11413 */
11414#ifdef HMVMX_USE_FUNCTION_TABLE
11415 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11416#else
11417 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11418#endif
11419}
11420
11421/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette