VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 105844

Last change on this file since 105844 was 104516, checked in by vboxsync, 7 months ago

VMM/GCM,IEM,HM: Integrate GCM with IEM, extending it to cover the mesa drv situation and valid ring-0 IN instructions to same port. Untested. TODO: NEM. bugref:9735 bugref:10683

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 530.0 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 104516 2024-05-04 01:53:42Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Guest-CPU state required for split-lock \#AC handling VM-exits.
92 */
93#define HMVMX_CPUMCTX_XPCT_AC ( CPUMCTX_EXTRN_CR0 \
94 | CPUMCTX_EXTRN_RFLAGS \
95 | CPUMCTX_EXTRN_SS \
96 | CPUMCTX_EXTRN_CS)
97
98/**
99 * Exception bitmap mask for real-mode guests (real-on-v86).
100 *
101 * We need to intercept all exceptions manually except:
102 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
103 * due to bugs in Intel CPUs.
104 * - \#PF need not be intercepted even in real-mode if we have nested paging
105 * support.
106 */
107#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
108 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
109 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
110 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
111 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
112 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
113 | RT_BIT(X86_XCPT_XF))
114
115/** Maximum VM-instruction error number. */
116#define HMVMX_INSTR_ERROR_MAX 28
117
118/** Profiling macro. */
119#ifdef HM_PROFILE_EXIT_DISPATCH
120# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
121# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
122#else
123# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
124# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
125#endif
126
127#ifndef IN_NEM_DARWIN
128/** Assert that preemption is disabled or covered by thread-context hooks. */
129# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
130 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
131
132/** Assert that we haven't migrated CPUs when thread-context hooks are not
133 * used. */
134# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
135 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
136 ("Illegal migration! Entered on CPU %u Current %u\n", \
137 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
138#else
139# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
140# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
141#endif
142
143/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
144 * context. */
145#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
146 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
147 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
148
149/** Log the VM-exit reason with an easily visible marker to identify it in a
150 * potential sea of logging data. */
151#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
152 do { \
153 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
154 HMGetVmxExitName(a_uExitReason))); \
155 } while (0) \
156
157
158/*********************************************************************************************************************************
159* Structures and Typedefs *
160*********************************************************************************************************************************/
161/**
162 * Memory operand read or write access.
163 */
164typedef enum VMXMEMACCESS
165{
166 VMXMEMACCESS_READ = 0,
167 VMXMEMACCESS_WRITE = 1
168} VMXMEMACCESS;
169
170
171/**
172 * VMX VM-exit handler.
173 *
174 * @returns Strict VBox status code (i.e. informational status codes too).
175 * @param pVCpu The cross context virtual CPU structure.
176 * @param pVmxTransient The VMX-transient structure.
177 */
178#ifndef HMVMX_USE_FUNCTION_TABLE
179typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
180#else
181typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
182/** Pointer to VM-exit handler. */
183typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
184#endif
185
186/**
187 * VMX VM-exit handler, non-strict status code.
188 *
189 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
190 *
191 * @returns VBox status code, no informational status code returned.
192 * @param pVCpu The cross context virtual CPU structure.
193 * @param pVmxTransient The VMX-transient structure.
194 *
195 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
196 * use of that status code will be replaced with VINF_EM_SOMETHING
197 * later when switching over to IEM.
198 */
199#ifndef HMVMX_USE_FUNCTION_TABLE
200typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
201#else
202typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
203#endif
204
205
206/*********************************************************************************************************************************
207* Internal Functions *
208*********************************************************************************************************************************/
209#ifndef HMVMX_USE_FUNCTION_TABLE
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
212# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
213#else
214# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
215# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
216#endif
217#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
218DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
219#endif
220
221static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
222
223/** @name VM-exit handler prototypes.
224 * @{
225 */
226static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
227static FNVMXEXITHANDLER vmxHCExitExtInt;
228static FNVMXEXITHANDLER vmxHCExitTripleFault;
229static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
230static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
231static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
232static FNVMXEXITHANDLER vmxHCExitCpuid;
233static FNVMXEXITHANDLER vmxHCExitGetsec;
234static FNVMXEXITHANDLER vmxHCExitHlt;
235static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
236static FNVMXEXITHANDLER vmxHCExitInvlpg;
237static FNVMXEXITHANDLER vmxHCExitRdpmc;
238static FNVMXEXITHANDLER vmxHCExitVmcall;
239#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
240static FNVMXEXITHANDLER vmxHCExitVmclear;
241static FNVMXEXITHANDLER vmxHCExitVmlaunch;
242static FNVMXEXITHANDLER vmxHCExitVmptrld;
243static FNVMXEXITHANDLER vmxHCExitVmptrst;
244static FNVMXEXITHANDLER vmxHCExitVmread;
245static FNVMXEXITHANDLER vmxHCExitVmresume;
246static FNVMXEXITHANDLER vmxHCExitVmwrite;
247static FNVMXEXITHANDLER vmxHCExitVmxoff;
248static FNVMXEXITHANDLER vmxHCExitVmxon;
249static FNVMXEXITHANDLER vmxHCExitInvvpid;
250# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
251static FNVMXEXITHANDLER vmxHCExitInvept;
252# endif
253#endif
254static FNVMXEXITHANDLER vmxHCExitRdtsc;
255static FNVMXEXITHANDLER vmxHCExitMovCRx;
256static FNVMXEXITHANDLER vmxHCExitMovDRx;
257static FNVMXEXITHANDLER vmxHCExitIoInstr;
258static FNVMXEXITHANDLER vmxHCExitRdmsr;
259static FNVMXEXITHANDLER vmxHCExitWrmsr;
260static FNVMXEXITHANDLER vmxHCExitMwait;
261static FNVMXEXITHANDLER vmxHCExitMtf;
262static FNVMXEXITHANDLER vmxHCExitMonitor;
263static FNVMXEXITHANDLER vmxHCExitPause;
264static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
265static FNVMXEXITHANDLER vmxHCExitApicAccess;
266static FNVMXEXITHANDLER vmxHCExitEptViolation;
267static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
268static FNVMXEXITHANDLER vmxHCExitRdtscp;
269static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
270static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
271static FNVMXEXITHANDLER vmxHCExitXsetbv;
272static FNVMXEXITHANDLER vmxHCExitInvpcid;
273#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
274static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
275#endif
276static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
277static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
278/** @} */
279
280#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
281/** @name Nested-guest VM-exit handler prototypes.
282 * @{
283 */
284static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
285static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
287static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
288static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
289static FNVMXEXITHANDLER vmxHCExitHltNested;
290static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
291static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
292static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
293static FNVMXEXITHANDLER vmxHCExitRdtscNested;
294static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
295static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
296static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
297static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
298static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
299static FNVMXEXITHANDLER vmxHCExitMwaitNested;
300static FNVMXEXITHANDLER vmxHCExitMtfNested;
301static FNVMXEXITHANDLER vmxHCExitMonitorNested;
302static FNVMXEXITHANDLER vmxHCExitPauseNested;
303static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
304static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
305static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
306static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
307static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
308static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
309static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
310static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
311static FNVMXEXITHANDLER vmxHCExitInstrNested;
312static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
313# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
314static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
315static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
316# endif
317/** @} */
318#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
319
320
321/*********************************************************************************************************************************
322* Global Variables *
323*********************************************************************************************************************************/
324#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
325/**
326 * Array of all VMCS fields.
327 * Any fields added to the VT-x spec. should be added here.
328 *
329 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
330 * of nested-guests.
331 */
332static const uint32_t g_aVmcsFields[] =
333{
334 /* 16-bit control fields. */
335 VMX_VMCS16_VPID,
336 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
337 VMX_VMCS16_EPTP_INDEX,
338 VMX_VMCS16_HLAT_PREFIX_SIZE,
339 VMX_VMCS16_LAST_PID_PTR_INDEX,
340
341 /* 16-bit guest-state fields. */
342 VMX_VMCS16_GUEST_ES_SEL,
343 VMX_VMCS16_GUEST_CS_SEL,
344 VMX_VMCS16_GUEST_SS_SEL,
345 VMX_VMCS16_GUEST_DS_SEL,
346 VMX_VMCS16_GUEST_FS_SEL,
347 VMX_VMCS16_GUEST_GS_SEL,
348 VMX_VMCS16_GUEST_LDTR_SEL,
349 VMX_VMCS16_GUEST_TR_SEL,
350 VMX_VMCS16_GUEST_INTR_STATUS,
351 VMX_VMCS16_GUEST_PML_INDEX,
352 VMX_VMCS16_GUEST_UINV,
353
354 /* 16-bits host-state fields. */
355 VMX_VMCS16_HOST_ES_SEL,
356 VMX_VMCS16_HOST_CS_SEL,
357 VMX_VMCS16_HOST_SS_SEL,
358 VMX_VMCS16_HOST_DS_SEL,
359 VMX_VMCS16_HOST_FS_SEL,
360 VMX_VMCS16_HOST_GS_SEL,
361 VMX_VMCS16_HOST_TR_SEL,
362
363 /* 64-bit control fields. */
364 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
365 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
366 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
367 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
368 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
369 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
370 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
371 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
372 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
373 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
374 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
375 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
376 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
377 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
378 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
379 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
380 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
381 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
382 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
383 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
384 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
385 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
386 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
387 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
388 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
389 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
390 VMX_VMCS64_CTRL_EPTP_FULL,
391 VMX_VMCS64_CTRL_EPTP_HIGH,
392 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
393 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
394 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
395 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
396 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
397 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
398 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
399 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
400 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
401 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
402 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
403 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
404 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
405 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
406 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
407 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
408 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_SPPTP_FULL,
413 VMX_VMCS64_CTRL_SPPTP_HIGH,
414 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
415 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
416 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
417 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
418 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
419 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
420 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
421 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
422 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
423 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
424 VMX_VMCS64_CTRL_EXIT2_FULL,
425 VMX_VMCS64_CTRL_EXIT2_HIGH,
426 VMX_VMCS64_CTRL_SPEC_CTRL_MASK_FULL,
427 VMX_VMCS64_CTRL_SPEC_CTRL_MASK_HIGH,
428 VMX_VMCS64_CTRL_SPEC_CTRL_SHADOW_FULL,
429 VMX_VMCS64_CTRL_SPEC_CTRL_SHADOW_HIGH,
430
431 /* 64-bit read-only data fields. */
432 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
433 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
434
435 /* 64-bit guest-state fields. */
436 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
437 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
438 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
439 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
440 VMX_VMCS64_GUEST_PAT_FULL,
441 VMX_VMCS64_GUEST_PAT_HIGH,
442 VMX_VMCS64_GUEST_EFER_FULL,
443 VMX_VMCS64_GUEST_EFER_HIGH,
444 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
445 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
446 VMX_VMCS64_GUEST_PDPTE0_FULL,
447 VMX_VMCS64_GUEST_PDPTE0_HIGH,
448 VMX_VMCS64_GUEST_PDPTE1_FULL,
449 VMX_VMCS64_GUEST_PDPTE1_HIGH,
450 VMX_VMCS64_GUEST_PDPTE2_FULL,
451 VMX_VMCS64_GUEST_PDPTE2_HIGH,
452 VMX_VMCS64_GUEST_PDPTE3_FULL,
453 VMX_VMCS64_GUEST_PDPTE3_HIGH,
454 VMX_VMCS64_GUEST_BNDCFGS_FULL,
455 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
456 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
457 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
458 VMX_VMCS64_GUEST_PKRS_FULL,
459 VMX_VMCS64_GUEST_PKRS_HIGH,
460
461 /* 64-bit host-state fields. */
462 VMX_VMCS64_HOST_PAT_FULL,
463 VMX_VMCS64_HOST_PAT_HIGH,
464 VMX_VMCS64_HOST_EFER_FULL,
465 VMX_VMCS64_HOST_EFER_HIGH,
466 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
467 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
468 VMX_VMCS64_HOST_PKRS_FULL,
469 VMX_VMCS64_HOST_PKRS_HIGH,
470
471 /* 32-bit control fields. */
472 VMX_VMCS32_CTRL_PIN_EXEC,
473 VMX_VMCS32_CTRL_PROC_EXEC,
474 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
475 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
476 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
477 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
478 VMX_VMCS32_CTRL_EXIT,
479 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
480 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
481 VMX_VMCS32_CTRL_ENTRY,
482 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
483 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
484 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
485 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
486 VMX_VMCS32_CTRL_TPR_THRESHOLD,
487 VMX_VMCS32_CTRL_PROC_EXEC2,
488 VMX_VMCS32_CTRL_PLE_GAP,
489 VMX_VMCS32_CTRL_PLE_WINDOW,
490 VMX_VMCS32_CTRL_INSTR_TIMEOUT,
491
492 /* 32-bits read-only fields. */
493 VMX_VMCS32_RO_VM_INSTR_ERROR,
494 VMX_VMCS32_RO_EXIT_REASON,
495 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
496 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
497 VMX_VMCS32_RO_IDT_VECTORING_INFO,
498 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
499 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
500 VMX_VMCS32_RO_EXIT_INSTR_INFO,
501
502 /* 32-bit guest-state fields. */
503 VMX_VMCS32_GUEST_ES_LIMIT,
504 VMX_VMCS32_GUEST_CS_LIMIT,
505 VMX_VMCS32_GUEST_SS_LIMIT,
506 VMX_VMCS32_GUEST_DS_LIMIT,
507 VMX_VMCS32_GUEST_FS_LIMIT,
508 VMX_VMCS32_GUEST_GS_LIMIT,
509 VMX_VMCS32_GUEST_LDTR_LIMIT,
510 VMX_VMCS32_GUEST_TR_LIMIT,
511 VMX_VMCS32_GUEST_GDTR_LIMIT,
512 VMX_VMCS32_GUEST_IDTR_LIMIT,
513 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
514 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
515 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
516 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
517 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
518 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
519 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
520 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
521 VMX_VMCS32_GUEST_INT_STATE,
522 VMX_VMCS32_GUEST_ACTIVITY_STATE,
523 VMX_VMCS32_GUEST_SMBASE,
524 VMX_VMCS32_GUEST_SYSENTER_CS,
525 VMX_VMCS32_PREEMPT_TIMER_VALUE,
526
527 /* 32-bit host-state fields. */
528 VMX_VMCS32_HOST_SYSENTER_CS,
529
530 /* Natural-width control fields. */
531 VMX_VMCS_CTRL_CR0_MASK,
532 VMX_VMCS_CTRL_CR4_MASK,
533 VMX_VMCS_CTRL_CR0_READ_SHADOW,
534 VMX_VMCS_CTRL_CR4_READ_SHADOW,
535 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
536 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
537 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
538 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
539
540 /* Natural-width read-only data fields. */
541 VMX_VMCS_RO_EXIT_QUALIFICATION,
542 VMX_VMCS_RO_IO_RCX,
543 VMX_VMCS_RO_IO_RSI,
544 VMX_VMCS_RO_IO_RDI,
545 VMX_VMCS_RO_IO_RIP,
546 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
547
548 /* Natural-width guest-state field */
549 VMX_VMCS_GUEST_CR0,
550 VMX_VMCS_GUEST_CR3,
551 VMX_VMCS_GUEST_CR4,
552 VMX_VMCS_GUEST_ES_BASE,
553 VMX_VMCS_GUEST_CS_BASE,
554 VMX_VMCS_GUEST_SS_BASE,
555 VMX_VMCS_GUEST_DS_BASE,
556 VMX_VMCS_GUEST_FS_BASE,
557 VMX_VMCS_GUEST_GS_BASE,
558 VMX_VMCS_GUEST_LDTR_BASE,
559 VMX_VMCS_GUEST_TR_BASE,
560 VMX_VMCS_GUEST_GDTR_BASE,
561 VMX_VMCS_GUEST_IDTR_BASE,
562 VMX_VMCS_GUEST_DR7,
563 VMX_VMCS_GUEST_RSP,
564 VMX_VMCS_GUEST_RIP,
565 VMX_VMCS_GUEST_RFLAGS,
566 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
567 VMX_VMCS_GUEST_SYSENTER_ESP,
568 VMX_VMCS_GUEST_SYSENTER_EIP,
569 VMX_VMCS_GUEST_S_CET,
570 VMX_VMCS_GUEST_SSP,
571 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
572
573 /* Natural-width host-state fields */
574 VMX_VMCS_HOST_CR0,
575 VMX_VMCS_HOST_CR3,
576 VMX_VMCS_HOST_CR4,
577 VMX_VMCS_HOST_FS_BASE,
578 VMX_VMCS_HOST_GS_BASE,
579 VMX_VMCS_HOST_TR_BASE,
580 VMX_VMCS_HOST_GDTR_BASE,
581 VMX_VMCS_HOST_IDTR_BASE,
582 VMX_VMCS_HOST_SYSENTER_ESP,
583 VMX_VMCS_HOST_SYSENTER_EIP,
584 VMX_VMCS_HOST_RSP,
585 VMX_VMCS_HOST_RIP,
586 VMX_VMCS_HOST_S_CET,
587 VMX_VMCS_HOST_SSP,
588 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
589};
590#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
591
592#ifdef HMVMX_USE_FUNCTION_TABLE
593/**
594 * VMX_EXIT dispatch table.
595 */
596static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
597{
598 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
599 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
600 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
601 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
602 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
603 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
604 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
605 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
606 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
607 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
608 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
609 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
610 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
611 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
612 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
613 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
614 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
615 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
616 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
617#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
618 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
619 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
620 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
621 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
622 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
623 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
624 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
625 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
626 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
627#else
628 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
629 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
630 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
631 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
632 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
633 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
634 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
635 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
636 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
637#endif
638 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
639 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
640 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
641 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
642 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
643 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
644 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
645 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
646 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
647 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
648 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
649 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
650 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
651 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
652 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
653 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
654 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
655 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
656 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
657 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
658 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
659 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
660#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
661 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
662#else
663 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
664#endif
665 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
666 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
667#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
668 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
669#else
670 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
671#endif
672 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
673 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
674 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
675 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
676 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
677 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
678 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
679 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
680 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
681 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
682 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
683 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
684 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
685 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
686 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
687 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
688};
689#endif /* HMVMX_USE_FUNCTION_TABLE */
690
691#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
692static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
693{
694 /* 0 */ "(Not Used)",
695 /* 1 */ "VMCALL executed in VMX root operation.",
696 /* 2 */ "VMCLEAR with invalid physical address.",
697 /* 3 */ "VMCLEAR with VMXON pointer.",
698 /* 4 */ "VMLAUNCH with non-clear VMCS.",
699 /* 5 */ "VMRESUME with non-launched VMCS.",
700 /* 6 */ "VMRESUME after VMXOFF",
701 /* 7 */ "VM-entry with invalid control fields.",
702 /* 8 */ "VM-entry with invalid host state fields.",
703 /* 9 */ "VMPTRLD with invalid physical address.",
704 /* 10 */ "VMPTRLD with VMXON pointer.",
705 /* 11 */ "VMPTRLD with incorrect revision identifier.",
706 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
707 /* 13 */ "VMWRITE to read-only VMCS component.",
708 /* 14 */ "(Not Used)",
709 /* 15 */ "VMXON executed in VMX root operation.",
710 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
711 /* 17 */ "VM-entry with non-launched executing VMCS.",
712 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
713 /* 19 */ "VMCALL with non-clear VMCS.",
714 /* 20 */ "VMCALL with invalid VM-exit control fields.",
715 /* 21 */ "(Not Used)",
716 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
717 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
718 /* 24 */ "VMCALL with invalid SMM-monitor features.",
719 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
720 /* 26 */ "VM-entry with events blocked by MOV SS.",
721 /* 27 */ "(Not Used)",
722 /* 28 */ "Invalid operand to INVEPT/INVVPID."
723};
724#endif /* VBOX_STRICT && LOG_ENABLED */
725
726
727/**
728 * Gets the CR0 guest/host mask.
729 *
730 * These bits typically does not change through the lifetime of a VM. Any bit set in
731 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
732 * by the guest.
733 *
734 * @returns The CR0 guest/host mask.
735 * @param pVCpu The cross context virtual CPU structure.
736 */
737static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
738{
739 /*
740 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
741 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
742 *
743 * Furthermore, modifications to any bits that are reserved/unspecified currently
744 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
745 * when future CPUs specify and use currently reserved/unspecified bits.
746 */
747 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
748 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
749 * and @bugref{6944}. */
750 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
751 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
752 return ( X86_CR0_PE
753 | X86_CR0_NE
754 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
755 | X86_CR0_PG
756 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
757}
758
759
760/**
761 * Gets the CR4 guest/host mask.
762 *
763 * These bits typically does not change through the lifetime of a VM. Any bit set in
764 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
765 * by the guest.
766 *
767 * @returns The CR4 guest/host mask.
768 * @param pVCpu The cross context virtual CPU structure.
769 */
770static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
771{
772 /*
773 * We construct a mask of all CR4 bits that the guest can modify without causing
774 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
775 * a VM-exit when the guest attempts to modify them when executing using
776 * hardware-assisted VMX.
777 *
778 * When a feature is not exposed to the guest (and may be present on the host),
779 * we want to intercept guest modifications to the bit so we can emulate proper
780 * behavior (e.g., #GP).
781 *
782 * Furthermore, only modifications to those bits that don't require immediate
783 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
784 * depends on CR3 which might not always be the guest value while executing
785 * using hardware-assisted VMX.
786 */
787 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
788 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
789#ifdef IN_NEM_DARWIN
790 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
791#endif
792 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
793
794 /*
795 * Paranoia.
796 * Ensure features exposed to the guest are present on the host.
797 */
798 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
799#ifdef IN_NEM_DARWIN
800 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
801#endif
802 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
803
804 uint64_t const fGstMask = X86_CR4_PVI
805 | X86_CR4_TSD
806 | X86_CR4_DE
807 | X86_CR4_MCE
808 | X86_CR4_PCE
809 | X86_CR4_OSXMMEEXCPT
810 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
811#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
812 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
813 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
814#endif
815 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
816 return ~fGstMask;
817}
818
819
820/**
821 * Checks whether an \#AC exception generated while executing a guest (or
822 * nested-guest) was due to a split-lock memory access.
823 *
824 * @returns @c true if split-lock triggered the \#AC, @c false otherwise.
825 * @param pVCpu The cross context virtual CPU structure.
826 */
827DECL_FORCE_INLINE(bool) vmxHCIsSplitLockAcXcpt(PVMCPU pVCpu)
828{
829 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
830 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) /* 1. If 486-style alignment checks aren't enabled, this must be a split-lock #AC. */
831 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) /* 2. When the EFLAGS.AC != 0 this can only be a split-lock case. */
832 || CPUMGetGuestCPL(pVCpu) != 3) /* 3. #AC cannot happen in rings 0-2 except for split-lock detection. */
833 return true;
834 return false;
835}
836
837
838/**
839 * Adds one or more exceptions to the exception bitmap and commits it to the current
840 * VMCS.
841 *
842 * @param pVCpu The cross context virtual CPU structure.
843 * @param pVmxTransient The VMX-transient structure.
844 * @param uXcptMask The exception(s) to add.
845 */
846static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
847{
848 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
849 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
850 if ((uXcptBitmap & uXcptMask) != uXcptMask)
851 {
852 uXcptBitmap |= uXcptMask;
853 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
854 AssertRC(rc);
855 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
856 }
857}
858
859
860/**
861 * Adds an exception to the exception bitmap and commits it to the current VMCS.
862 *
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param pVmxTransient The VMX-transient structure.
865 * @param uXcpt The exception to add.
866 */
867static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
868{
869 Assert(uXcpt <= X86_XCPT_LAST);
870 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
871}
872
873
874/**
875 * Remove one or more exceptions from the exception bitmap and commits it to the
876 * current VMCS.
877 *
878 * This takes care of not removing the exception intercept if a nested-guest
879 * requires the exception to be intercepted.
880 *
881 * @returns VBox status code.
882 * @param pVCpu The cross context virtual CPU structure.
883 * @param pVmxTransient The VMX-transient structure.
884 * @param uXcptMask The exception(s) to remove.
885 */
886static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
887{
888 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
889 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
890 if (uXcptBitmap & uXcptMask)
891 {
892#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
893 if (!pVmxTransient->fIsNestedGuest)
894 { /* likely */ }
895 else
896 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
897#endif
898#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
899 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
900 | RT_BIT(X86_XCPT_DE)
901 | RT_BIT(X86_XCPT_NM)
902 | RT_BIT(X86_XCPT_TS)
903 | RT_BIT(X86_XCPT_UD)
904 | RT_BIT(X86_XCPT_NP)
905 | RT_BIT(X86_XCPT_SS)
906 | RT_BIT(X86_XCPT_GP)
907 | RT_BIT(X86_XCPT_PF)
908 | RT_BIT(X86_XCPT_MF));
909#elif defined(HMVMX_ALWAYS_TRAP_PF)
910 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
911#endif
912 if (uXcptMask)
913 {
914 /* Validate we are not removing any essential exception intercepts. */
915#ifndef IN_NEM_DARWIN
916 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
917#else
918 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
919#endif
920 NOREF(pVCpu);
921 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
922 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
923
924 /* Remove it from the exception bitmap. */
925 uXcptBitmap &= ~uXcptMask;
926
927 /* Commit and update the cache if necessary. */
928 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
929 {
930 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
931 AssertRC(rc);
932 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
933 }
934 }
935 }
936 return VINF_SUCCESS;
937}
938
939
940/**
941 * Remove an exceptions from the exception bitmap and commits it to the current
942 * VMCS.
943 *
944 * @returns VBox status code.
945 * @param pVCpu The cross context virtual CPU structure.
946 * @param pVmxTransient The VMX-transient structure.
947 * @param uXcpt The exception to remove.
948 */
949static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
950{
951 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
952}
953
954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
955
956/**
957 * Loads the shadow VMCS specified by the VMCS info. object.
958 *
959 * @returns VBox status code.
960 * @param pVmcsInfo The VMCS info. object.
961 *
962 * @remarks Can be called with interrupts disabled.
963 */
964static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
965{
966 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
967 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
968
969 return VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
970}
971
972
973/**
974 * Clears the shadow VMCS specified by the VMCS info. object.
975 *
976 * @returns VBox status code.
977 * @param pVmcsInfo The VMCS info. object.
978 *
979 * @remarks Can be called with interrupts disabled.
980 */
981static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
982{
983 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
984 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
985
986 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
987 if (RT_SUCCESS(rc))
988 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
989 return rc;
990}
991
992
993/**
994 * Switches from and to the specified VMCSes.
995 *
996 * @returns VBox status code.
997 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
998 * @param pVmcsInfoTo The VMCS info. object we are switching to.
999 *
1000 * @remarks Called with interrupts disabled.
1001 */
1002static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1003{
1004 /*
1005 * Clear the VMCS we are switching out if it has not already been cleared.
1006 * This will sync any CPU internal data back to the VMCS.
1007 */
1008 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1009 {
1010 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1011 if (RT_SUCCESS(rc))
1012 {
1013 /*
1014 * The shadow VMCS, if any, would not be active at this point since we
1015 * would have cleared it while importing the virtual hardware-virtualization
1016 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1017 * clear the shadow VMCS here, just assert for safety.
1018 */
1019 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1020 }
1021 else
1022 return rc;
1023 }
1024
1025 /*
1026 * Clear the VMCS we are switching to if it has not already been cleared.
1027 * This will initialize the VMCS launch state to "clear" required for loading it.
1028 *
1029 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1030 */
1031 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1032 {
1033 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1034 if (RT_SUCCESS(rc))
1035 { /* likely */ }
1036 else
1037 return rc;
1038 }
1039
1040 /*
1041 * Finally, load the VMCS we are switching to.
1042 */
1043 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1044}
1045
1046
1047/**
1048 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1049 * caller.
1050 *
1051 * @returns VBox status code.
1052 * @param pVCpu The cross context virtual CPU structure.
1053 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1054 * true) or guest VMCS (pass false).
1055 */
1056static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1057{
1058 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1059 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1060
1061 PVMXVMCSINFO pVmcsInfoFrom;
1062 PVMXVMCSINFO pVmcsInfoTo;
1063 if (fSwitchToNstGstVmcs)
1064 {
1065 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1066 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1067 Assert(!pVCpu->hm.s.vmx.fMergedNstGstCtls);
1068 }
1069 else
1070 {
1071 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1072 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1073 }
1074
1075 /*
1076 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1077 * preemption hook code path acquires the current VMCS.
1078 */
1079 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1080
1081 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1082 if (RT_SUCCESS(rc))
1083 {
1084 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1085 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1086
1087 /*
1088 * If we are switching to a VMCS that was executed on a different host CPU or was
1089 * never executed before, flag that we need to export the host state before executing
1090 * guest/nested-guest code using hardware-assisted VMX.
1091 *
1092 * This could probably be done in a preemptible context since the preemption hook
1093 * will flag the necessary change in host context. However, since preemption is
1094 * already disabled and to avoid making assumptions about host specific code in
1095 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1096 * disabled.
1097 */
1098 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1099 { /* likely */ }
1100 else
1101 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1102
1103 ASMSetFlags(fEFlags);
1104
1105 /*
1106 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1107 * flag that we need to update the host MSR values there. Even if we decide in the
1108 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1109 * if its content differs, we would have to update the host MSRs anyway.
1110 */
1111 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1112 }
1113 else
1114 ASMSetFlags(fEFlags);
1115 return rc;
1116}
1117
1118#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1119#ifdef VBOX_STRICT
1120
1121/**
1122 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1123 * transient structure.
1124 *
1125 * @param pVCpu The cross context virtual CPU structure.
1126 * @param pVmxTransient The VMX-transient structure.
1127 */
1128DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1129{
1130 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1131 AssertRC(rc);
1132}
1133
1134
1135/**
1136 * Reads the VM-entry exception error code field from the VMCS into
1137 * the VMX transient structure.
1138 *
1139 * @param pVCpu The cross context virtual CPU structure.
1140 * @param pVmxTransient The VMX-transient structure.
1141 */
1142DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1143{
1144 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1145 AssertRC(rc);
1146}
1147
1148
1149/**
1150 * Reads the VM-entry exception error code field from the VMCS into
1151 * the VMX transient structure.
1152 *
1153 * @param pVCpu The cross context virtual CPU structure.
1154 * @param pVmxTransient The VMX-transient structure.
1155 */
1156DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1157{
1158 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1159 AssertRC(rc);
1160}
1161
1162#endif /* VBOX_STRICT */
1163
1164
1165/**
1166 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1167 *
1168 * Don't call directly unless the it's likely that some or all of the fields
1169 * given in @a a_fReadMask have already been read.
1170 *
1171 * @tparam a_fReadMask The fields to read.
1172 * @param pVCpu The cross context virtual CPU structure.
1173 * @param pVmxTransient The VMX-transient structure.
1174 */
1175template<uint32_t const a_fReadMask>
1176static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1177{
1178 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1179 | HMVMX_READ_EXIT_INSTR_LEN
1180 | HMVMX_READ_EXIT_INSTR_INFO
1181 | HMVMX_READ_IDT_VECTORING_INFO
1182 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1183 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1184 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1185 | HMVMX_READ_GUEST_LINEAR_ADDR
1186 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1187 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1188 )) == 0);
1189
1190 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1191 {
1192 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1193
1194 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1195 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1196 {
1197 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1198 AssertRC(rc);
1199 }
1200 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1201 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1202 {
1203 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1204 AssertRC(rc);
1205 }
1206 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1207 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1208 {
1209 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1210 AssertRC(rc);
1211 }
1212 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1213 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1214 {
1215 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1216 AssertRC(rc);
1217 }
1218 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1219 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1220 {
1221 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1222 AssertRC(rc);
1223 }
1224 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1225 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1226 {
1227 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1228 AssertRC(rc);
1229 }
1230 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1231 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1232 {
1233 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1234 AssertRC(rc);
1235 }
1236 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1237 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1238 {
1239 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1240 AssertRC(rc);
1241 }
1242 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1243 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1244 {
1245 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1246 AssertRC(rc);
1247 }
1248 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1249 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1250 {
1251 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1252 AssertRC(rc);
1253 }
1254
1255 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1256 }
1257}
1258
1259
1260/**
1261 * Reads VMCS fields into the VMXTRANSIENT structure.
1262 *
1263 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1264 * generating an optimized read sequences w/o any conditionals between in
1265 * non-strict builds.
1266 *
1267 * @tparam a_fReadMask The fields to read. One or more of the
1268 * HMVMX_READ_XXX fields ORed together.
1269 * @param pVCpu The cross context virtual CPU structure.
1270 * @param pVmxTransient The VMX-transient structure.
1271 */
1272template<uint32_t const a_fReadMask>
1273DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1274{
1275 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1276 | HMVMX_READ_EXIT_INSTR_LEN
1277 | HMVMX_READ_EXIT_INSTR_INFO
1278 | HMVMX_READ_IDT_VECTORING_INFO
1279 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1280 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1281 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1282 | HMVMX_READ_GUEST_LINEAR_ADDR
1283 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1284 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1285 )) == 0);
1286
1287 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1288 {
1289 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1290 {
1291 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1292 AssertRC(rc);
1293 }
1294 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1295 {
1296 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1297 AssertRC(rc);
1298 }
1299 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1300 {
1301 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1302 AssertRC(rc);
1303 }
1304 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1305 {
1306 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1307 AssertRC(rc);
1308 }
1309 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1310 {
1311 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1312 AssertRC(rc);
1313 }
1314 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1315 {
1316 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1317 AssertRC(rc);
1318 }
1319 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1320 {
1321 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1322 AssertRC(rc);
1323 }
1324 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1325 {
1326 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1327 AssertRC(rc);
1328 }
1329 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1330 {
1331 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1332 AssertRC(rc);
1333 }
1334 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1335 {
1336 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1337 AssertRC(rc);
1338 }
1339
1340 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1341 }
1342 else
1343 {
1344 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1345 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1346 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1347 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1348 }
1349}
1350
1351
1352#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1353/**
1354 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1355 *
1356 * @param pVCpu The cross context virtual CPU structure.
1357 * @param pVmxTransient The VMX-transient structure.
1358 */
1359static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1360{
1361 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1362 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1363 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1364 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1365 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1366 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1367 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1368 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1369 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1370 AssertRC(rc);
1371 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1372 | HMVMX_READ_EXIT_INSTR_LEN
1373 | HMVMX_READ_EXIT_INSTR_INFO
1374 | HMVMX_READ_IDT_VECTORING_INFO
1375 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1376 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1377 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1378 | HMVMX_READ_GUEST_LINEAR_ADDR
1379 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1380}
1381#endif
1382
1383/**
1384 * Verifies that our cached values of the VMCS fields are all consistent with
1385 * what's actually present in the VMCS.
1386 *
1387 * @returns VBox status code.
1388 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1389 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1390 * VMCS content. HMCPU error-field is
1391 * updated, see VMX_VCI_XXX.
1392 * @param pVCpu The cross context virtual CPU structure.
1393 * @param pVmcsInfo The VMCS info. object.
1394 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1395 */
1396static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1397{
1398 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1399
1400 uint32_t u32Val;
1401 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1402 AssertRC(rc);
1403 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1404 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1405 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1406 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1407
1408 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1409 AssertRC(rc);
1410 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1411 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1412 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1413 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1414
1415 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1416 AssertRC(rc);
1417 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1418 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1419 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1420 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1421
1422 /** @todo Currently disabled for nested-guests because we run into bit differences
1423 * with for INT_WINDOW, RDTSC/P, see @bugref{10318}. Later try figure out
1424 * why and re-enable. */
1425 if (!fIsNstGstVmcs)
1426 {
1427 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1428 AssertRC(rc);
1429 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1430 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1431 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1432 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1433 }
1434
1435 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1436 {
1437 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1438 AssertRC(rc);
1439 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1440 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1441 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1442 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1443 }
1444
1445 uint64_t u64Val;
1446 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1447 {
1448 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1449 AssertRC(rc);
1450 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1451 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1452 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1453 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1454 }
1455
1456 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1457 AssertRC(rc);
1458 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1459 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1460 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1461 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1462
1463 /*
1464 * The TSC offset will only be used when RDTSC is not intercepted.
1465 * Since we don't actively clear it while switching between intercepting or not,
1466 * the value here could be stale.
1467 */
1468 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
1469 {
1470 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1471 AssertRC(rc);
1472 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1473 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1474 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1475 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1476 }
1477
1478 NOREF(pcszVmcs);
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1485 * VMCS.
1486 *
1487 * This is typically required when the guest changes paging mode.
1488 *
1489 * @returns VBox status code.
1490 * @param pVCpu The cross context virtual CPU structure.
1491 * @param pVmxTransient The VMX-transient structure.
1492 *
1493 * @remarks Requires EFER.
1494 * @remarks No-long-jump zone!!!
1495 */
1496static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1497{
1498 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1499 {
1500 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1501 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1502
1503 /*
1504 * VM-entry controls.
1505 */
1506 {
1507 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1508 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1509
1510 /*
1511 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1512 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1513 *
1514 * For nested-guests, this is a mandatory VM-entry control. It's also
1515 * required because we do not want to leak host bits to the nested-guest.
1516 */
1517 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1518
1519 /*
1520 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1521 *
1522 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1523 * required to get the nested-guest working with hardware-assisted VMX execution.
1524 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1525 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1526 * here rather than while merging the guest VMCS controls.
1527 */
1528 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1529 {
1530 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1531 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1532 }
1533 else
1534 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1535
1536 /*
1537 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1538 *
1539 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1540 * regardless of whether the nested-guest VMCS specifies it because we are free to
1541 * load whatever MSRs we require and we do not need to modify the guest visible copy
1542 * of the VM-entry MSR load area.
1543 */
1544 if ( g_fHmVmxSupportsVmcsEfer
1545#ifndef IN_NEM_DARWIN
1546 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1547#endif
1548 )
1549 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1550 else
1551 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1552
1553 /*
1554 * The following should -not- be set (since we're not in SMM mode):
1555 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1556 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1557 */
1558
1559 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1560 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1561
1562 if ((fVal & fZap) == fVal)
1563 { /* likely */ }
1564 else
1565 {
1566 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1567 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1568 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1569 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1570 }
1571
1572 /* Commit it to the VMCS. */
1573 if (pVmcsInfo->u32EntryCtls != fVal)
1574 {
1575 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1576 AssertRC(rc);
1577 pVmcsInfo->u32EntryCtls = fVal;
1578 }
1579 }
1580
1581 /*
1582 * VM-exit controls.
1583 */
1584 {
1585 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1586 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1587
1588 /*
1589 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1590 * supported the 1-setting of this bit.
1591 *
1592 * For nested-guests, we set the "save debug controls" as the converse
1593 * "load debug controls" is mandatory for nested-guests anyway.
1594 */
1595 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1596
1597 /*
1598 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1599 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1600 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1601 * vmxHCExportHostMsrs().
1602 *
1603 * For nested-guests, we always set this bit as we do not support 32-bit
1604 * hosts.
1605 */
1606 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1607
1608#ifndef IN_NEM_DARWIN
1609 /*
1610 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1611 *
1612 * For nested-guests, we should use the "save IA32_EFER" control if we also
1613 * used the "load IA32_EFER" control while exporting VM-entry controls.
1614 */
1615 if ( g_fHmVmxSupportsVmcsEfer
1616 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1617 {
1618 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1619 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1620 }
1621#endif
1622
1623 /*
1624 * Enable saving of the VMX-preemption timer value on VM-exit.
1625 * For nested-guests, currently not exposed/used.
1626 */
1627 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1628 * the timer value. */
1629 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1630 {
1631 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1632 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1633 }
1634
1635 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1636 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1637
1638 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1639 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1640 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1641
1642 if ((fVal & fZap) == fVal)
1643 { /* likely */ }
1644 else
1645 {
1646 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1647 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1648 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1649 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1650 }
1651
1652 /* Commit it to the VMCS. */
1653 if (pVmcsInfo->u32ExitCtls != fVal)
1654 {
1655 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1656 AssertRC(rc);
1657 pVmcsInfo->u32ExitCtls = fVal;
1658 }
1659 }
1660
1661 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1662 }
1663 return VINF_SUCCESS;
1664}
1665
1666
1667/**
1668 * Sets the TPR threshold in the VMCS.
1669 *
1670 * @param pVCpu The cross context virtual CPU structure.
1671 * @param pVmcsInfo The VMCS info. object.
1672 * @param u32TprThreshold The TPR threshold (task-priority class only).
1673 */
1674DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1675{
1676 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1677 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1678 RT_NOREF(pVmcsInfo);
1679 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1680 AssertRC(rc);
1681}
1682
1683
1684/**
1685 * Exports the guest APIC TPR state into the VMCS.
1686 *
1687 * @param pVCpu The cross context virtual CPU structure.
1688 * @param pVmxTransient The VMX-transient structure.
1689 *
1690 * @remarks No-long-jump zone!!!
1691 */
1692static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1693{
1694 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1695 {
1696 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1697
1698 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1699 if (!pVmxTransient->fIsNestedGuest)
1700 {
1701 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1702 && APICIsEnabled(pVCpu))
1703 {
1704 /*
1705 * Setup TPR shadowing.
1706 */
1707 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1708 {
1709 bool fPendingIntr = false;
1710 uint8_t u8Tpr = 0;
1711 uint8_t u8PendingIntr = 0;
1712 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1713 AssertRC(rc);
1714
1715 /*
1716 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1717 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1718 * priority of the pending interrupt so we can deliver the interrupt. If there
1719 * are no interrupts pending, set threshold to 0 to not cause any
1720 * TPR-below-threshold VM-exits.
1721 */
1722 uint32_t u32TprThreshold = 0;
1723 if (fPendingIntr)
1724 {
1725 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1726 (which is the Task-Priority Class). */
1727 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1728 const uint8_t u8TprPriority = u8Tpr >> 4;
1729 if (u8PendingPriority <= u8TprPriority)
1730 u32TprThreshold = u8PendingPriority;
1731 }
1732
1733 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1734 }
1735 }
1736 }
1737 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1738 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1739 }
1740}
1741
1742
1743/**
1744 * Gets the guest interruptibility-state and updates related internal eflags
1745 * inhibition state.
1746 *
1747 * @returns Guest's interruptibility-state.
1748 * @param pVCpu The cross context virtual CPU structure.
1749 *
1750 * @remarks No-long-jump zone!!!
1751 */
1752static uint32_t vmxHCGetGuestIntrStateWithUpdate(PVMCPUCC pVCpu)
1753{
1754 uint32_t fIntrState;
1755
1756 /*
1757 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1758 */
1759 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1760 fIntrState = 0;
1761 else
1762 {
1763 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1764 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1765
1766 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1767 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1768 else
1769 {
1770 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1771
1772 /* Block-by-STI must not be set when interrupts are disabled. */
1773 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1774 }
1775 }
1776
1777 /*
1778 * Check if we should inhibit NMI delivery.
1779 */
1780 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1781 { /* likely */ }
1782 else
1783 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1784
1785 /*
1786 * Validate.
1787 */
1788 /* We don't support block-by-SMI yet.*/
1789 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1790
1791 return fIntrState;
1792}
1793
1794
1795/**
1796 * Exports the exception intercepts required for guest execution in the VMCS.
1797 *
1798 * @param pVCpu The cross context virtual CPU structure.
1799 * @param pVmxTransient The VMX-transient structure.
1800 *
1801 * @remarks No-long-jump zone!!!
1802 */
1803static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1804{
1805 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1806 {
1807 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1808 if ( !pVmxTransient->fIsNestedGuest
1809 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1810 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1811 else
1812 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1813
1814 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1815 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1816 }
1817}
1818
1819
1820/**
1821 * Exports the guest's RIP into the guest-state area in the VMCS.
1822 *
1823 * @param pVCpu The cross context virtual CPU structure.
1824 *
1825 * @remarks No-long-jump zone!!!
1826 */
1827static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1828{
1829 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1830 {
1831 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1832
1833 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1834 AssertRC(rc);
1835
1836 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1837 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1838 }
1839}
1840
1841
1842/**
1843 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1844 *
1845 * @param pVCpu The cross context virtual CPU structure.
1846 * @param pVmxTransient The VMX-transient structure.
1847 *
1848 * @remarks No-long-jump zone!!!
1849 */
1850static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1851{
1852 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1853 {
1854 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1855
1856 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1857 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1858 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1859 Use 32-bit VMWRITE. */
1860 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1861 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1862 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1863
1864#ifndef IN_NEM_DARWIN
1865 /*
1866 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1867 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1868 * can run the real-mode guest code under Virtual 8086 mode.
1869 */
1870 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1871 if (pVmcsInfo->RealMode.fRealOnV86Active)
1872 {
1873 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1874 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1875 Assert(!pVmxTransient->fIsNestedGuest);
1876 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1877 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1878 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1879 }
1880#else
1881 RT_NOREF(pVmxTransient);
1882#endif
1883
1884 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1885 AssertRC(rc);
1886
1887 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1888 Log4Func(("eflags=%#RX32\n", fEFlags));
1889 }
1890}
1891
1892
1893#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1894/**
1895 * Copies the nested-guest VMCS to the shadow VMCS.
1896 *
1897 * @returns VBox status code.
1898 * @param pVCpu The cross context virtual CPU structure.
1899 * @param pVmcsInfo The VMCS info. object.
1900 *
1901 * @remarks No-long-jump zone!!!
1902 */
1903static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1904{
1905 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1906 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1907
1908 /*
1909 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1910 * current VMCS, as we may try saving guest lazy MSRs.
1911 *
1912 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1913 * calling the import VMCS code which is currently performing the guest MSR reads
1914 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1915 * and the rest of the VMX leave session machinery.
1916 */
1917 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1918
1919 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1920 if (RT_SUCCESS(rc))
1921 {
1922 /*
1923 * Copy all guest read/write VMCS fields.
1924 *
1925 * We don't check for VMWRITE failures here for performance reasons and
1926 * because they are not expected to fail, barring irrecoverable conditions
1927 * like hardware errors.
1928 */
1929 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1930 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1931 {
1932 uint64_t u64Val;
1933 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1934 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1935 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1936 }
1937
1938 /*
1939 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1940 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1941 */
1942 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1943 {
1944 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1945 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1946 {
1947 uint64_t u64Val;
1948 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1949 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1950 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1951 }
1952 }
1953
1954 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1955 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1956 }
1957
1958 ASMSetFlags(fEFlags);
1959 return rc;
1960}
1961
1962
1963/**
1964 * Copies the shadow VMCS to the nested-guest VMCS.
1965 *
1966 * @returns VBox status code.
1967 * @param pVCpu The cross context virtual CPU structure.
1968 * @param pVmcsInfo The VMCS info. object.
1969 *
1970 * @remarks Called with interrupts disabled.
1971 */
1972static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1973{
1974 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1975 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1976 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1977
1978 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1979 if (RT_SUCCESS(rc))
1980 {
1981 /*
1982 * Copy guest read/write fields from the shadow VMCS.
1983 * Guest read-only fields cannot be modified, so no need to copy them.
1984 *
1985 * We don't check for VMREAD failures here for performance reasons and
1986 * because they are not expected to fail, barring irrecoverable conditions
1987 * like hardware errors.
1988 */
1989 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1990 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1991 {
1992 uint64_t u64Val;
1993 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1994 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1995 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1996 }
1997
1998 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1999 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
2000 }
2001 return rc;
2002}
2003
2004
2005/**
2006 * Enables VMCS shadowing for the given VMCS info. object.
2007 *
2008 * @param pVCpu The cross context virtual CPU structure.
2009 * @param pVmcsInfo The VMCS info. object.
2010 *
2011 * @remarks No-long-jump zone!!!
2012 */
2013static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2014{
2015 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2016 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2017 {
2018 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2019 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2020 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2021 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2022 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2023 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2024 Log4Func(("Enabled\n"));
2025 }
2026}
2027
2028
2029/**
2030 * Disables VMCS shadowing for the given VMCS info. object.
2031 *
2032 * @param pVCpu The cross context virtual CPU structure.
2033 * @param pVmcsInfo The VMCS info. object.
2034 *
2035 * @remarks No-long-jump zone!!!
2036 */
2037static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2038{
2039 /*
2040 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2041 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2042 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2043 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2044 *
2045 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2046 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2047 */
2048 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2049 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2050 {
2051 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2052 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2053 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2054 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2055 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2056 Log4Func(("Disabled\n"));
2057 }
2058}
2059#endif
2060
2061
2062/**
2063 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2064 *
2065 * The guest FPU state is always pre-loaded hence we don't need to bother about
2066 * sharing FPU related CR0 bits between the guest and host.
2067 *
2068 * @returns VBox status code.
2069 * @param pVCpu The cross context virtual CPU structure.
2070 * @param pVmxTransient The VMX-transient structure.
2071 *
2072 * @remarks No-long-jump zone!!!
2073 */
2074static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2075{
2076 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2077 {
2078 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2079 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2080
2081 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2082 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2083 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2084 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2085 else
2086 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2087
2088 if (!pVmxTransient->fIsNestedGuest)
2089 {
2090 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2091 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2092 uint64_t const u64ShadowCr0 = u64GuestCr0;
2093 Assert(!RT_HI_U32(u64GuestCr0));
2094
2095 /*
2096 * Setup VT-x's view of the guest CR0.
2097 */
2098 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2099 if (VM_IS_VMX_NESTED_PAGING(pVM))
2100 {
2101#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2102 if (CPUMIsGuestPagingEnabled(pVCpu))
2103 {
2104 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2105 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2106 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2107 }
2108 else
2109 {
2110 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2111 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2112 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2113 }
2114
2115 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2116 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2117 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2118#endif
2119 }
2120 else
2121 {
2122 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2123 u64GuestCr0 |= X86_CR0_WP;
2124 }
2125
2126 /*
2127 * Guest FPU bits.
2128 *
2129 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2130 * using CR0.TS.
2131 *
2132 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2133 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2134 */
2135 u64GuestCr0 |= X86_CR0_NE;
2136
2137 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2138 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2139
2140 /*
2141 * Update exception intercepts.
2142 */
2143 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2144#ifndef IN_NEM_DARWIN
2145 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2146 {
2147 Assert(PDMVmmDevHeapIsEnabled(pVM));
2148 Assert(pVM->hm.s.vmx.pRealModeTSS);
2149 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2150 }
2151 else
2152#endif
2153 {
2154 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2155 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2156 if (fInterceptMF)
2157 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2158 }
2159
2160 /* Additional intercepts for debugging, define these yourself explicitly. */
2161#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2162 uXcptBitmap |= 0
2163 | RT_BIT(X86_XCPT_BP)
2164 | RT_BIT(X86_XCPT_DE)
2165 | RT_BIT(X86_XCPT_NM)
2166 | RT_BIT(X86_XCPT_TS)
2167 | RT_BIT(X86_XCPT_UD)
2168 | RT_BIT(X86_XCPT_NP)
2169 | RT_BIT(X86_XCPT_SS)
2170 | RT_BIT(X86_XCPT_GP)
2171 | RT_BIT(X86_XCPT_PF)
2172 | RT_BIT(X86_XCPT_MF)
2173 ;
2174#elif defined(HMVMX_ALWAYS_TRAP_PF)
2175 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2176#endif
2177 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2178 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2179 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2180 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2181 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2182
2183 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2184 u64GuestCr0 |= fSetCr0;
2185 u64GuestCr0 &= fZapCr0;
2186 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2187
2188 Assert(!RT_HI_U32(u64GuestCr0));
2189 Assert(u64GuestCr0 & X86_CR0_NE);
2190
2191 /* Commit the CR0 and related fields to the guest VMCS. */
2192 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2193 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2194 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2195 {
2196 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2197 AssertRC(rc);
2198 }
2199 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2200 {
2201 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2202 AssertRC(rc);
2203 }
2204
2205 /* Update our caches. */
2206 pVmcsInfo->u32ProcCtls = uProcCtls;
2207 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2208
2209 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2210 }
2211 else
2212 {
2213 /*
2214 * With nested-guests, we may have extended the guest/host mask here since we
2215 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2216 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2217 * originally supplied. We must copy those bits from the nested-guest CR0 into
2218 * the nested-guest CR0 read-shadow.
2219 */
2220 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2221 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2222 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2223
2224 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2225 u64GuestCr0 |= fSetCr0;
2226 u64GuestCr0 &= fZapCr0;
2227 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2228
2229 Assert(!RT_HI_U32(u64GuestCr0));
2230 Assert(u64GuestCr0 & X86_CR0_NE);
2231
2232 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2233 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2234 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2235
2236 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2237 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2238 }
2239
2240 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2241 }
2242
2243 return VINF_SUCCESS;
2244}
2245
2246
2247/**
2248 * Exports the guest control registers (CR3, CR4) into the guest-state area
2249 * in the VMCS.
2250 *
2251 * @returns VBox strict status code.
2252 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2253 * without unrestricted guest access and the VMMDev is not presently
2254 * mapped (e.g. EFI32).
2255 *
2256 * @param pVCpu The cross context virtual CPU structure.
2257 * @param pVmxTransient The VMX-transient structure.
2258 *
2259 * @remarks No-long-jump zone!!!
2260 */
2261static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2262{
2263 int rc = VINF_SUCCESS;
2264 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2265
2266 /*
2267 * Guest CR2.
2268 * It's always loaded in the assembler code. Nothing to do here.
2269 */
2270
2271 /*
2272 * Guest CR3.
2273 */
2274 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2275 {
2276 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2277
2278 if (VM_IS_VMX_NESTED_PAGING(pVM))
2279 {
2280#ifndef IN_NEM_DARWIN
2281 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2282 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2283
2284 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2285 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2286 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2287 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2288
2289 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2290 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2291 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2292
2293 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2294 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2295 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2296 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2297 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2298 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2299 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2300
2301 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2302 AssertRC(rc);
2303#endif
2304
2305 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2306 uint64_t u64GuestCr3 = pCtx->cr3;
2307 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2308 || CPUMIsGuestPagingEnabledEx(pCtx))
2309 {
2310 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2311 if (CPUMIsGuestInPAEModeEx(pCtx))
2312 {
2313 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2314 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2315 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2316 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2317 }
2318
2319 /*
2320 * The guest's view of its CR3 is unblemished with nested paging when the
2321 * guest is using paging or we have unrestricted guest execution to handle
2322 * the guest when it's not using paging.
2323 */
2324 }
2325#ifndef IN_NEM_DARWIN
2326 else
2327 {
2328 /*
2329 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2330 * thinks it accesses physical memory directly, we use our identity-mapped
2331 * page table to map guest-linear to guest-physical addresses. EPT takes care
2332 * of translating it to host-physical addresses.
2333 */
2334 RTGCPHYS GCPhys;
2335 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2336
2337 /* We obtain it here every time as the guest could have relocated this PCI region. */
2338 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2339 if (RT_SUCCESS(rc))
2340 { /* likely */ }
2341 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2342 {
2343 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2344 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2345 }
2346 else
2347 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2348
2349 u64GuestCr3 = GCPhys;
2350 }
2351#endif
2352
2353 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2354 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2355 AssertRC(rc);
2356 }
2357 else
2358 {
2359 Assert(!pVmxTransient->fIsNestedGuest);
2360 /* Non-nested paging case, just use the hypervisor's CR3. */
2361 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2362
2363 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2364 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2365 AssertRC(rc);
2366 }
2367
2368 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2369 }
2370
2371 /*
2372 * Guest CR4.
2373 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2374 */
2375 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2376 {
2377 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2378 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2379
2380 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2381 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2382
2383 /*
2384 * With nested-guests, we may have extended the guest/host mask here (since we
2385 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2386 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2387 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2388 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2389 */
2390 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2391 uint64_t u64GuestCr4 = pCtx->cr4;
2392 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2393 ? pCtx->cr4
2394 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2395 Assert(!RT_HI_U32(u64GuestCr4));
2396
2397#ifndef IN_NEM_DARWIN
2398 /*
2399 * Setup VT-x's view of the guest CR4.
2400 *
2401 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2402 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2403 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2404 *
2405 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2406 */
2407 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2408 {
2409 Assert(pVM->hm.s.vmx.pRealModeTSS);
2410 Assert(PDMVmmDevHeapIsEnabled(pVM));
2411 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2412 }
2413#endif
2414
2415 if (VM_IS_VMX_NESTED_PAGING(pVM))
2416 {
2417 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2418 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2419 {
2420 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2421 u64GuestCr4 |= X86_CR4_PSE;
2422 /* Our identity mapping is a 32-bit page directory. */
2423 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2424 }
2425 /* else use guest CR4.*/
2426 }
2427 else
2428 {
2429 Assert(!pVmxTransient->fIsNestedGuest);
2430
2431 /*
2432 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2433 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2434 */
2435 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2436 {
2437 case PGMMODE_REAL: /* Real-mode. */
2438 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2439 case PGMMODE_32_BIT: /* 32-bit paging. */
2440 {
2441 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2442 break;
2443 }
2444
2445 case PGMMODE_PAE: /* PAE paging. */
2446 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2447 {
2448 u64GuestCr4 |= X86_CR4_PAE;
2449 break;
2450 }
2451
2452 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2453 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2454 {
2455#ifdef VBOX_WITH_64_BITS_GUESTS
2456 /* For our assumption in vmxHCShouldSwapEferMsr. */
2457 Assert(u64GuestCr4 & X86_CR4_PAE);
2458 break;
2459#endif
2460 }
2461 default:
2462 AssertFailed();
2463 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2464 }
2465 }
2466
2467 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2468 u64GuestCr4 |= fSetCr4;
2469 u64GuestCr4 &= fZapCr4;
2470
2471 Assert(!RT_HI_U32(u64GuestCr4));
2472 Assert(u64GuestCr4 & X86_CR4_VMXE);
2473
2474 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2475 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2476 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2477
2478#ifndef IN_NEM_DARWIN
2479 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2480 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2481 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2482 {
2483 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2484 hmR0VmxUpdateStartVmFunction(pVCpu);
2485 }
2486#endif
2487
2488 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2489
2490 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2491 }
2492 return rc;
2493}
2494
2495
2496#ifdef VBOX_STRICT
2497/**
2498 * Strict function to validate segment registers.
2499 *
2500 * @param pVCpu The cross context virtual CPU structure.
2501 * @param pVmcsInfo The VMCS info. object.
2502 *
2503 * @remarks Will import guest CR0 on strict builds during validation of
2504 * segments.
2505 */
2506static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2507{
2508 /*
2509 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2510 *
2511 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2512 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2513 * unusable bit and doesn't change the guest-context value.
2514 */
2515 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2516 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2517 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2518 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2519 && ( !CPUMIsGuestInRealModeEx(pCtx)
2520 && !CPUMIsGuestInV86ModeEx(pCtx)))
2521 {
2522 /* Protected mode checks */
2523 /* CS */
2524 Assert(pCtx->cs.Attr.n.u1Present);
2525 Assert(!(pCtx->cs.Attr.u & 0xf00));
2526 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2527 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2528 || !(pCtx->cs.Attr.n.u1Granularity));
2529 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2530 || (pCtx->cs.Attr.n.u1Granularity));
2531 /* CS cannot be loaded with NULL in protected mode. */
2532 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2533 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2534 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2535 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2536 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2537 else
2538 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2539 /* SS */
2540 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2541 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2542 if ( !(pCtx->cr0 & X86_CR0_PE)
2543 || pCtx->cs.Attr.n.u4Type == 3)
2544 {
2545 Assert(!pCtx->ss.Attr.n.u2Dpl);
2546 }
2547 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2550 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2551 Assert(pCtx->ss.Attr.n.u1Present);
2552 Assert(!(pCtx->ss.Attr.u & 0xf00));
2553 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->ss.Attr.n.u1Granularity));
2556 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2557 || (pCtx->ss.Attr.n.u1Granularity));
2558 }
2559 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2560 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2561 {
2562 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2563 Assert(pCtx->ds.Attr.n.u1Present);
2564 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2565 Assert(!(pCtx->ds.Attr.u & 0xf00));
2566 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2567 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2568 || !(pCtx->ds.Attr.n.u1Granularity));
2569 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2570 || (pCtx->ds.Attr.n.u1Granularity));
2571 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2572 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2573 }
2574 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2575 {
2576 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2577 Assert(pCtx->es.Attr.n.u1Present);
2578 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2579 Assert(!(pCtx->es.Attr.u & 0xf00));
2580 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2581 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2582 || !(pCtx->es.Attr.n.u1Granularity));
2583 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2584 || (pCtx->es.Attr.n.u1Granularity));
2585 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2586 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2587 }
2588 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2589 {
2590 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2591 Assert(pCtx->fs.Attr.n.u1Present);
2592 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2593 Assert(!(pCtx->fs.Attr.u & 0xf00));
2594 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2595 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2596 || !(pCtx->fs.Attr.n.u1Granularity));
2597 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2598 || (pCtx->fs.Attr.n.u1Granularity));
2599 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2600 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2601 }
2602 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2603 {
2604 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2605 Assert(pCtx->gs.Attr.n.u1Present);
2606 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2607 Assert(!(pCtx->gs.Attr.u & 0xf00));
2608 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2609 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2610 || !(pCtx->gs.Attr.n.u1Granularity));
2611 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2612 || (pCtx->gs.Attr.n.u1Granularity));
2613 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2614 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2615 }
2616 /* 64-bit capable CPUs. */
2617 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2618 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2619 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2620 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2621 }
2622 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2623 || ( CPUMIsGuestInRealModeEx(pCtx)
2624 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2625 {
2626 /* Real and v86 mode checks. */
2627 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2628 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2629#ifndef IN_NEM_DARWIN
2630 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2631 {
2632 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2633 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2634 }
2635 else
2636#endif
2637 {
2638 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2639 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2640 }
2641
2642 /* CS */
2643 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2644 Assert(pCtx->cs.u32Limit == 0xffff);
2645 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2646 /* SS */
2647 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2648 Assert(pCtx->ss.u32Limit == 0xffff);
2649 Assert(u32SSAttr == 0xf3);
2650 /* DS */
2651 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2652 Assert(pCtx->ds.u32Limit == 0xffff);
2653 Assert(u32DSAttr == 0xf3);
2654 /* ES */
2655 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2656 Assert(pCtx->es.u32Limit == 0xffff);
2657 Assert(u32ESAttr == 0xf3);
2658 /* FS */
2659 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2660 Assert(pCtx->fs.u32Limit == 0xffff);
2661 Assert(u32FSAttr == 0xf3);
2662 /* GS */
2663 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2664 Assert(pCtx->gs.u32Limit == 0xffff);
2665 Assert(u32GSAttr == 0xf3);
2666 /* 64-bit capable CPUs. */
2667 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2668 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2669 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2670 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2671 }
2672}
2673#endif /* VBOX_STRICT */
2674
2675
2676/**
2677 * Exports a guest segment register into the guest-state area in the VMCS.
2678 *
2679 * @returns VBox status code.
2680 * @param pVCpu The cross context virtual CPU structure.
2681 * @param pVmcsInfo The VMCS info. object.
2682 * @param iSegReg The segment register number (X86_SREG_XXX).
2683 * @param pSelReg Pointer to the segment selector.
2684 *
2685 * @remarks No-long-jump zone!!!
2686 */
2687static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2688{
2689 Assert(iSegReg < X86_SREG_COUNT);
2690
2691 uint32_t u32Access = pSelReg->Attr.u;
2692#ifndef IN_NEM_DARWIN
2693 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2694#endif
2695 {
2696 /*
2697 * The way to differentiate between whether this is really a null selector or was just
2698 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2699 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2700 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2701 * NULL selectors loaded in protected-mode have their attribute as 0.
2702 */
2703 if (u32Access)
2704 { }
2705 else
2706 u32Access = X86DESCATTR_UNUSABLE;
2707 }
2708#ifndef IN_NEM_DARWIN
2709 else
2710 {
2711 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2712 u32Access = 0xf3;
2713 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2714 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2715 RT_NOREF_PV(pVCpu);
2716 }
2717#else
2718 RT_NOREF(pVmcsInfo);
2719#endif
2720
2721 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2722 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2723 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2724
2725 /*
2726 * Commit it to the VMCS.
2727 */
2728 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2729 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2730 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2731 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2732 return VINF_SUCCESS;
2733}
2734
2735
2736/**
2737 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2738 * area in the VMCS.
2739 *
2740 * @returns VBox status code.
2741 * @param pVCpu The cross context virtual CPU structure.
2742 * @param pVmxTransient The VMX-transient structure.
2743 *
2744 * @remarks Will import guest CR0 on strict builds during validation of
2745 * segments.
2746 * @remarks No-long-jump zone!!!
2747 */
2748static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2749{
2750 int rc = VERR_INTERNAL_ERROR_5;
2751#ifndef IN_NEM_DARWIN
2752 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2753#endif
2754 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2755 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2756#ifndef IN_NEM_DARWIN
2757 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2758#endif
2759
2760 /*
2761 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2762 */
2763 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2764 {
2765 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2766 {
2767 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2768#ifndef IN_NEM_DARWIN
2769 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2770 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2771#endif
2772 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2773 AssertRC(rc);
2774 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2775 }
2776
2777 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2778 {
2779 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2780#ifndef IN_NEM_DARWIN
2781 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2782 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2783#endif
2784 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2785 AssertRC(rc);
2786 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2787 }
2788
2789 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2790 {
2791 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2792#ifndef IN_NEM_DARWIN
2793 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2794 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2795#endif
2796 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2797 AssertRC(rc);
2798 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2799 }
2800
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2804#ifndef IN_NEM_DARWIN
2805 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2806 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2807#endif
2808 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2809 AssertRC(rc);
2810 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2811 }
2812
2813 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2814 {
2815 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2816#ifndef IN_NEM_DARWIN
2817 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2818 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2819#endif
2820 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2821 AssertRC(rc);
2822 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2823 }
2824
2825 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2826 {
2827 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2828#ifndef IN_NEM_DARWIN
2829 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2830 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2831#endif
2832 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2833 AssertRC(rc);
2834 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2835 }
2836
2837#ifdef VBOX_STRICT
2838 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2839#endif
2840 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2841 pCtx->cs.Attr.u));
2842 }
2843
2844 /*
2845 * Guest TR.
2846 */
2847 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2848 {
2849 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2850
2851 /*
2852 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2853 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2854 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2855 */
2856 uint16_t u16Sel;
2857 uint32_t u32Limit;
2858 uint64_t u64Base;
2859 uint32_t u32AccessRights;
2860#ifndef IN_NEM_DARWIN
2861 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2862#endif
2863 {
2864 u16Sel = pCtx->tr.Sel;
2865 u32Limit = pCtx->tr.u32Limit;
2866 u64Base = pCtx->tr.u64Base;
2867 u32AccessRights = pCtx->tr.Attr.u;
2868 }
2869#ifndef IN_NEM_DARWIN
2870 else
2871 {
2872 Assert(!pVmxTransient->fIsNestedGuest);
2873 Assert(pVM->hm.s.vmx.pRealModeTSS);
2874 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2875
2876 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2877 RTGCPHYS GCPhys;
2878 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2879 AssertRCReturn(rc, rc);
2880
2881 X86DESCATTR DescAttr;
2882 DescAttr.u = 0;
2883 DescAttr.n.u1Present = 1;
2884 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2885
2886 u16Sel = 0;
2887 u32Limit = HM_VTX_TSS_SIZE;
2888 u64Base = GCPhys;
2889 u32AccessRights = DescAttr.u;
2890 }
2891#endif
2892
2893 /* Validate. */
2894 Assert(!(u16Sel & RT_BIT(2)));
2895 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2896 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2897 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2898 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2899 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2900 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2901 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2902 Assert( (u32Limit & 0xfff) == 0xfff
2903 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2904 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2905 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2906
2907 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2908 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2909 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2910 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2911
2912 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2913 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2914 }
2915
2916 /*
2917 * Guest GDTR.
2918 */
2919 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2920 {
2921 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2922
2923 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2924 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2925
2926 /* Validate. */
2927 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2928
2929 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2930 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2931 }
2932
2933 /*
2934 * Guest LDTR.
2935 */
2936 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2937 {
2938 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2939
2940 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2941 uint32_t u32Access;
2942 if ( !pVmxTransient->fIsNestedGuest
2943 && !pCtx->ldtr.Attr.u)
2944 u32Access = X86DESCATTR_UNUSABLE;
2945 else
2946 u32Access = pCtx->ldtr.Attr.u;
2947
2948 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2949 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2950 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2951 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2952
2953 /* Validate. */
2954 if (!(u32Access & X86DESCATTR_UNUSABLE))
2955 {
2956 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2957 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2958 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2959 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2960 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2961 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2962 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2963 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2964 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2965 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2966 }
2967
2968 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2969 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2970 }
2971
2972 /*
2973 * Guest IDTR.
2974 */
2975 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2976 {
2977 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2978
2979 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2980 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2981
2982 /* Validate. */
2983 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2984
2985 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2986 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2987 }
2988
2989 return VINF_SUCCESS;
2990}
2991
2992
2993/**
2994 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2995 * VM-exit interruption info type.
2996 *
2997 * @returns The IEM exception flags.
2998 * @param uVector The event vector.
2999 * @param uVmxEventType The VMX event type.
3000 *
3001 * @remarks This function currently only constructs flags required for
3002 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
3003 * and CR2 aspects of an exception are not included).
3004 */
3005static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
3006{
3007 uint32_t fIemXcptFlags;
3008 switch (uVmxEventType)
3009 {
3010 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
3011 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
3012 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
3013 break;
3014
3015 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
3016 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
3017 break;
3018
3019 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3020 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3021 break;
3022
3023 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3024 {
3025 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3026 if (uVector == X86_XCPT_BP)
3027 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3028 else if (uVector == X86_XCPT_OF)
3029 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3030 else
3031 {
3032 fIemXcptFlags = 0;
3033 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3034 }
3035 break;
3036 }
3037
3038 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3039 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3040 break;
3041
3042 default:
3043 fIemXcptFlags = 0;
3044 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3045 break;
3046 }
3047 return fIemXcptFlags;
3048}
3049
3050
3051/**
3052 * Sets an event as a pending event to be injected into the guest.
3053 *
3054 * @param pVCpu The cross context virtual CPU structure.
3055 * @param u32IntInfo The VM-entry interruption-information field.
3056 * @param cbInstr The VM-entry instruction length in bytes (for
3057 * software interrupts, exceptions and privileged
3058 * software exceptions).
3059 * @param u32ErrCode The VM-entry exception error code.
3060 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3061 * page-fault.
3062 */
3063DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3064 RTGCUINTPTR GCPtrFaultAddress)
3065{
3066 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3067 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3068 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3069 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3070 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3071 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3072}
3073
3074
3075/**
3076 * Sets an external interrupt as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 * @param u8Interrupt The external interrupt vector.
3080 */
3081DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3082{
3083 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3087 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3088 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
3089}
3090
3091
3092/**
3093 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3094 *
3095 * @param pVCpu The cross context virtual CPU structure.
3096 */
3097DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3098{
3099 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3102 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3103 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3104 Log4Func(("NMI pending injection\n"));
3105}
3106
3107
3108/**
3109 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3110 *
3111 * @param pVCpu The cross context virtual CPU structure.
3112 */
3113DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3114{
3115 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3118 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3119 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3120}
3121
3122
3123/**
3124 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3125 *
3126 * @param pVCpu The cross context virtual CPU structure.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3135}
3136
3137
3138/**
3139 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3140 *
3141 * @param pVCpu The cross context virtual CPU structure.
3142 */
3143DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3144{
3145 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3146 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3147 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3148 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3149 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3150}
3151
3152
3153#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3154/**
3155 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3156 *
3157 * @param pVCpu The cross context virtual CPU structure.
3158 * @param u32ErrCode The error code for the general-protection exception.
3159 */
3160DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3161{
3162 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3163 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3164 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3165 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3166 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3167}
3168
3169
3170/**
3171 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3172 *
3173 * @param pVCpu The cross context virtual CPU structure.
3174 * @param u32ErrCode The error code for the stack exception.
3175 */
3176DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3177{
3178 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3179 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3180 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3181 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3182 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3183}
3184#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3185
3186
3187/**
3188 * Fixes up attributes for the specified segment register.
3189 *
3190 * @param pVCpu The cross context virtual CPU structure.
3191 * @param pSelReg The segment register that needs fixing.
3192 * @param pszRegName The register name (for logging and assertions).
3193 */
3194static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3195{
3196 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3197
3198 /*
3199 * If VT-x marks the segment as unusable, most other bits remain undefined:
3200 * - For CS the L, D and G bits have meaning.
3201 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3202 * - For the remaining data segments no bits are defined.
3203 *
3204 * The present bit and the unusable bit has been observed to be set at the
3205 * same time (the selector was supposed to be invalid as we started executing
3206 * a V8086 interrupt in ring-0).
3207 *
3208 * What should be important for the rest of the VBox code, is that the P bit is
3209 * cleared. Some of the other VBox code recognizes the unusable bit, but
3210 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3211 * safe side here, we'll strip off P and other bits we don't care about. If
3212 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3213 *
3214 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3215 */
3216#ifdef VBOX_STRICT
3217 uint32_t const uAttr = pSelReg->Attr.u;
3218#endif
3219
3220 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3221 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3222 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3223
3224#ifdef VBOX_STRICT
3225# ifndef IN_NEM_DARWIN
3226 VMMRZCallRing3Disable(pVCpu);
3227# endif
3228 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3229# ifdef DEBUG_bird
3230 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3231 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3232 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3233# endif
3234# ifndef IN_NEM_DARWIN
3235 VMMRZCallRing3Enable(pVCpu);
3236# endif
3237 NOREF(uAttr);
3238#endif
3239 RT_NOREF2(pVCpu, pszRegName);
3240}
3241
3242
3243/**
3244 * Imports a guest segment register from the current VMCS into the guest-CPU
3245 * context.
3246 *
3247 * @param pVCpu The cross context virtual CPU structure.
3248 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3249 *
3250 * @remarks Called with interrupts and/or preemption disabled.
3251 */
3252template<uint32_t const a_iSegReg>
3253DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3254{
3255 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3256 /* Check that the macros we depend upon here and in the export parenter function works: */
3257#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3258 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3259 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3260 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3261 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3262 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3263 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3264 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3265 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3266 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3267 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3268
3269 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3270
3271 uint16_t u16Sel;
3272 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3273 pSelReg->Sel = u16Sel;
3274 pSelReg->ValidSel = u16Sel;
3275
3276 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3277 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3278
3279 uint32_t u32Attr;
3280 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3281 pSelReg->Attr.u = u32Attr;
3282 if (u32Attr & X86DESCATTR_UNUSABLE)
3283 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3284
3285 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3286}
3287
3288
3289/**
3290 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3291 *
3292 * @param pVCpu The cross context virtual CPU structure.
3293 *
3294 * @remarks Called with interrupts and/or preemption disabled.
3295 */
3296DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3297{
3298 uint16_t u16Sel;
3299 uint64_t u64Base;
3300 uint32_t u32Limit, u32Attr;
3301 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3302 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3303 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3304 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3305
3306 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3307 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3308 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3309 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3310 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3311 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3312 if (u32Attr & X86DESCATTR_UNUSABLE)
3313 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3314}
3315
3316
3317/**
3318 * Imports the guest TR from the VMCS into the guest-CPU context.
3319 *
3320 * @param pVCpu The cross context virtual CPU structure.
3321 *
3322 * @remarks Called with interrupts and/or preemption disabled.
3323 */
3324DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3325{
3326 uint16_t u16Sel;
3327 uint64_t u64Base;
3328 uint32_t u32Limit, u32Attr;
3329 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3330 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3331 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3332 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3333
3334 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3335 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3336 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3337 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3338 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3339 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3340 /* TR is the only selector that can never be unusable. */
3341 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3342}
3343
3344
3345/**
3346 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3347 *
3348 * @returns The RIP value.
3349 * @param pVCpu The cross context virtual CPU structure.
3350 *
3351 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3352 * @remarks Do -not- call this function directly!
3353 */
3354DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3355{
3356 uint64_t u64Val;
3357 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3358 AssertRC(rc);
3359
3360 pVCpu->cpum.GstCtx.rip = u64Val;
3361
3362 return u64Val;
3363}
3364
3365
3366/**
3367 * Imports the guest RIP from the VMCS into the guest-CPU context.
3368 *
3369 * @param pVCpu The cross context virtual CPU structure.
3370 *
3371 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3372 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3373 * instead!!!
3374 */
3375DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3376{
3377 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3378 {
3379 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3380 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3381 }
3382}
3383
3384
3385/**
3386 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3387 *
3388 * @param pVCpu The cross context virtual CPU structure.
3389 * @param pVmcsInfo The VMCS info. object.
3390 *
3391 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3392 * @remarks Do -not- call this function directly!
3393 */
3394DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3395{
3396 uint64_t fRFlags;
3397 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3398 AssertRC(rc);
3399
3400 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3401 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3402
3403 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3404#ifndef IN_NEM_DARWIN
3405 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3406 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3407 { /* mostly likely */ }
3408 else
3409 {
3410 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3411 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3412 }
3413#else
3414 RT_NOREF(pVmcsInfo);
3415#endif
3416}
3417
3418
3419/**
3420 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3421 *
3422 * @param pVCpu The cross context virtual CPU structure.
3423 * @param pVmcsInfo The VMCS info. object.
3424 *
3425 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3426 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3427 * instead!!!
3428 */
3429DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3430{
3431 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3432 {
3433 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3434 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3435 }
3436}
3437
3438
3439#ifndef IN_NEM_DARWIN
3440/**
3441 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3442 * context.
3443 *
3444 * The other MSRs are in the VM-exit MSR-store.
3445 *
3446 * @returns VBox status code.
3447 * @param pVCpu The cross context virtual CPU structure.
3448 * @param pVmcsInfo The VMCS info. object.
3449 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3450 * unexpected errors). Ignored in NEM/darwin context.
3451 */
3452DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3453{
3454 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3455 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3456 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3457 Assert(pMsrs);
3458 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3459 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3460 for (uint32_t i = 0; i < cMsrs; i++)
3461 {
3462 uint32_t const idMsr = pMsrs[i].u32Msr;
3463 switch (idMsr)
3464 {
3465 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3466 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3467 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3468 default:
3469 {
3470 uint32_t idxLbrMsr;
3471 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3472 if (VM_IS_VMX_LBR(pVM))
3473 {
3474 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3475 {
3476 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3477 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3478 break;
3479 }
3480 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3481 {
3482 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3483 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3484 break;
3485 }
3486 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3487 {
3488 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3489 break;
3490 }
3491 /* Fallthru (no break) */
3492 }
3493 pVCpu->cpum.GstCtx.fExtrn = 0;
3494 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3495 ASMSetFlags(fEFlags);
3496 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3497 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3498 }
3499 }
3500 }
3501 return VINF_SUCCESS;
3502}
3503#endif /* !IN_NEM_DARWIN */
3504
3505
3506/**
3507 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3508 *
3509 * @param pVCpu The cross context virtual CPU structure.
3510 * @param pVmcsInfo The VMCS info. object.
3511 */
3512DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3513{
3514 uint64_t u64Cr0;
3515 uint64_t u64Shadow;
3516 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3517 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3518#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3519 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3520 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3521#else
3522 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3523 {
3524 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3525 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3526 }
3527 else
3528 {
3529 /*
3530 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3531 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3532 * re-construct CR0. See @bugref{9180#c95} for details.
3533 */
3534 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3535 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3536 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3537 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3538 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3539 Assert(u64Cr0 & X86_CR0_NE);
3540 }
3541#endif
3542
3543#ifndef IN_NEM_DARWIN
3544 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3545#endif
3546 CPUMSetGuestCR0(pVCpu, u64Cr0);
3547#ifndef IN_NEM_DARWIN
3548 VMMRZCallRing3Enable(pVCpu);
3549#endif
3550}
3551
3552
3553/**
3554 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3555 *
3556 * @param pVCpu The cross context virtual CPU structure.
3557 */
3558DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3559{
3560 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3561 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3562
3563 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3564 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3565 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3566 && CPUMIsGuestPagingEnabledEx(pCtx)))
3567 {
3568 uint64_t u64Cr3;
3569 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3570 if (pCtx->cr3 != u64Cr3)
3571 {
3572 pCtx->cr3 = u64Cr3;
3573 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3574 }
3575
3576 /*
3577 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3578 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3579 */
3580 if (CPUMIsGuestInPAEModeEx(pCtx))
3581 {
3582 X86PDPE aPaePdpes[4];
3583 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3584 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3585 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3586 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3587 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3588 {
3589 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3590 /* PGM now updates PAE PDPTEs while updating CR3. */
3591 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3592 }
3593 }
3594 }
3595}
3596
3597
3598/**
3599 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3600 *
3601 * @param pVCpu The cross context virtual CPU structure.
3602 * @param pVmcsInfo The VMCS info. object.
3603 */
3604DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3605{
3606 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3607 uint64_t u64Cr4;
3608 uint64_t u64Shadow;
3609 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3610 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3611#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3612 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3613 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3614#else
3615 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3616 {
3617 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3618 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3619 }
3620 else
3621 {
3622 /*
3623 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3624 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3625 * re-construct CR4. See @bugref{9180#c95} for details.
3626 */
3627 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3628 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3629 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3630 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3631 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3632 Assert(u64Cr4 & X86_CR4_VMXE);
3633 }
3634#endif
3635 pCtx->cr4 = u64Cr4;
3636}
3637
3638
3639/**
3640 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3641 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3642 */
3643DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3644{
3645 /*
3646 * We must import RIP here to set our EM interrupt-inhibited state.
3647 * We also import RFLAGS as our code that evaluates pending interrupts
3648 * before VM-entry requires it.
3649 */
3650 vmxHCImportGuestRip(pVCpu);
3651 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3652
3653 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3654 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3655 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3656 pVCpu->cpum.GstCtx.rip);
3657 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3658}
3659
3660
3661/**
3662 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3663 * context.
3664 *
3665 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3666 *
3667 * @param pVCpu The cross context virtual CPU structure.
3668 * @param pVmcsInfo The VMCS info. object.
3669 *
3670 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3671 * do not log!
3672 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3673 * instead!!!
3674 */
3675DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3676{
3677 uint32_t u32Val;
3678 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3679 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3680 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3681 if (!u32Val)
3682 {
3683 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3684 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3685 }
3686 else
3687 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3688}
3689
3690
3691/**
3692 * Worker for VMXR0ImportStateOnDemand.
3693 *
3694 * @returns VBox status code.
3695 * @param pVCpu The cross context virtual CPU structure.
3696 * @param pVmcsInfo The VMCS info. object.
3697 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3698 */
3699static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3700{
3701 int rc = VINF_SUCCESS;
3702 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3703 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3704 uint32_t u32Val;
3705
3706 /*
3707 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3708 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3709 * neither are other host platforms.
3710 *
3711 * Committing this temporarily as it prevents BSOD.
3712 *
3713 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3714 */
3715#ifdef RT_OS_WINDOWS
3716 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3717 return VERR_HM_IPE_1;
3718#endif
3719
3720 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3721
3722#ifndef IN_NEM_DARWIN
3723 /*
3724 * We disable interrupts to make the updating of the state and in particular
3725 * the fExtrn modification atomic wrt to preemption hooks.
3726 */
3727 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3728#endif
3729
3730 fWhat &= pCtx->fExtrn;
3731 if (fWhat)
3732 {
3733 do
3734 {
3735 if (fWhat & CPUMCTX_EXTRN_RIP)
3736 vmxHCImportGuestRip(pVCpu);
3737
3738 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3739 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3740
3741 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3742 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3743 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3744
3745 if (fWhat & CPUMCTX_EXTRN_RSP)
3746 {
3747 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3748 AssertRC(rc);
3749 }
3750
3751 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3752 {
3753 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3754#ifndef IN_NEM_DARWIN
3755 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3756#else
3757 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3758#endif
3759 if (fWhat & CPUMCTX_EXTRN_CS)
3760 {
3761 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3762 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3763 if (fRealOnV86Active)
3764 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3765 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3766 }
3767 if (fWhat & CPUMCTX_EXTRN_SS)
3768 {
3769 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3770 if (fRealOnV86Active)
3771 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3772 }
3773 if (fWhat & CPUMCTX_EXTRN_DS)
3774 {
3775 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3776 if (fRealOnV86Active)
3777 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3778 }
3779 if (fWhat & CPUMCTX_EXTRN_ES)
3780 {
3781 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3782 if (fRealOnV86Active)
3783 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3784 }
3785 if (fWhat & CPUMCTX_EXTRN_FS)
3786 {
3787 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3788 if (fRealOnV86Active)
3789 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3790 }
3791 if (fWhat & CPUMCTX_EXTRN_GS)
3792 {
3793 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3794 if (fRealOnV86Active)
3795 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3796 }
3797 }
3798
3799 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3800 {
3801 if (fWhat & CPUMCTX_EXTRN_LDTR)
3802 vmxHCImportGuestLdtr(pVCpu);
3803
3804 if (fWhat & CPUMCTX_EXTRN_GDTR)
3805 {
3806 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3807 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3808 pCtx->gdtr.cbGdt = u32Val;
3809 }
3810
3811 /* Guest IDTR. */
3812 if (fWhat & CPUMCTX_EXTRN_IDTR)
3813 {
3814 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3815 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3816 pCtx->idtr.cbIdt = u32Val;
3817 }
3818
3819 /* Guest TR. */
3820 if (fWhat & CPUMCTX_EXTRN_TR)
3821 {
3822#ifndef IN_NEM_DARWIN
3823 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3824 don't need to import that one. */
3825 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3826#endif
3827 vmxHCImportGuestTr(pVCpu);
3828 }
3829 }
3830
3831 if (fWhat & CPUMCTX_EXTRN_DR7)
3832 {
3833#ifndef IN_NEM_DARWIN
3834 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3835#endif
3836 {
3837 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3838 AssertRC(rc);
3839 }
3840 }
3841
3842 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3843 {
3844 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3845 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3846 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3847 pCtx->SysEnter.cs = u32Val;
3848 }
3849
3850#ifndef IN_NEM_DARWIN
3851 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3852 {
3853 if ( pVM->hmr0.s.fAllow64BitGuests
3854 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3855 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3856 }
3857
3858 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3859 {
3860 if ( pVM->hmr0.s.fAllow64BitGuests
3861 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3862 {
3863 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3864 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3865 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3866 }
3867 }
3868
3869 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3870 {
3871 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3872 AssertRCReturn(rc, rc);
3873 }
3874#else
3875 NOREF(pVM);
3876#endif
3877
3878 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3879 {
3880 if (fWhat & CPUMCTX_EXTRN_CR0)
3881 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3882
3883 if (fWhat & CPUMCTX_EXTRN_CR4)
3884 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3885
3886 if (fWhat & CPUMCTX_EXTRN_CR3)
3887 vmxHCImportGuestCr3(pVCpu);
3888 }
3889
3890#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3891 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3892 {
3893 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3894 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3895 {
3896 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3897 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3898 if (RT_SUCCESS(rc))
3899 { /* likely */ }
3900 else
3901 break;
3902 }
3903 }
3904#endif
3905 } while (0);
3906
3907 if (RT_SUCCESS(rc))
3908 {
3909 /* Update fExtrn. */
3910 pCtx->fExtrn &= ~fWhat;
3911
3912 /* If everything has been imported, clear the HM keeper bit. */
3913 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3914 {
3915#ifndef IN_NEM_DARWIN
3916 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3917#else
3918 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3919#endif
3920 Assert(!pCtx->fExtrn);
3921 }
3922 }
3923 }
3924#ifndef IN_NEM_DARWIN
3925 else
3926 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3927
3928 /*
3929 * Restore interrupts.
3930 */
3931 ASMSetFlags(fEFlags);
3932#endif
3933
3934 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3935
3936 if (RT_SUCCESS(rc))
3937 { /* likely */ }
3938 else
3939 return rc;
3940
3941 /*
3942 * Honor any pending CR3 updates.
3943 *
3944 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3945 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3946 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3947 *
3948 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3949 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3950 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3951 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3952 *
3953 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3954 *
3955 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3956 */
3957 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3958#ifndef IN_NEM_DARWIN
3959 && VMMRZCallRing3IsEnabled(pVCpu)
3960#endif
3961 )
3962 {
3963 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3964 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3965 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3966 }
3967
3968 return VINF_SUCCESS;
3969}
3970
3971
3972/**
3973 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3974 *
3975 * @returns VBox status code.
3976 * @param pVCpu The cross context virtual CPU structure.
3977 * @param pVmcsInfo The VMCS info. object.
3978 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3979 * in NEM/darwin context.
3980 * @tparam a_fWhat What to import, zero or more bits from
3981 * HMVMX_CPUMCTX_EXTRN_ALL.
3982 */
3983template<uint64_t const a_fWhat>
3984static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3985{
3986 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3987 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3988 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3989 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3990
3991 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3992
3993 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3994
3995 /* RIP and RFLAGS may have been imported already by the post exit code
3996 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3997 of the code is skipping this part of the code. */
3998 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3999 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
4000 {
4001 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
4002 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
4003
4004 if (a_fWhat & CPUMCTX_EXTRN_RIP)
4005 {
4006 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
4007 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
4008 else
4009 vmxHCImportGuestCoreRip(pVCpu);
4010 }
4011 }
4012
4013 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
4014 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
4015 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
4016
4017 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
4018 {
4019 if (a_fWhat & CPUMCTX_EXTRN_CS)
4020 {
4021 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
4022 /** @todo try get rid of this carp, it smells and is probably never ever
4023 * used: */
4024 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
4025 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
4026 {
4027 vmxHCImportGuestCoreRip(pVCpu);
4028 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
4029 }
4030 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
4031 }
4032 if (a_fWhat & CPUMCTX_EXTRN_SS)
4033 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
4034 if (a_fWhat & CPUMCTX_EXTRN_DS)
4035 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
4036 if (a_fWhat & CPUMCTX_EXTRN_ES)
4037 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
4038 if (a_fWhat & CPUMCTX_EXTRN_FS)
4039 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
4040 if (a_fWhat & CPUMCTX_EXTRN_GS)
4041 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
4042
4043 /* Guest TR.
4044 Real-mode emulation using virtual-8086 mode has the fake TSS
4045 (pRealModeTSS) in TR, don't need to import that one. */
4046#ifndef IN_NEM_DARWIN
4047 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4048 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4049 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4050#else
4051 if (a_fWhat & CPUMCTX_EXTRN_TR)
4052#endif
4053 vmxHCImportGuestTr(pVCpu);
4054
4055#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4056 if (fRealOnV86Active)
4057 {
4058 if (a_fWhat & CPUMCTX_EXTRN_CS)
4059 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4060 if (a_fWhat & CPUMCTX_EXTRN_SS)
4061 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4062 if (a_fWhat & CPUMCTX_EXTRN_DS)
4063 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4064 if (a_fWhat & CPUMCTX_EXTRN_ES)
4065 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4066 if (a_fWhat & CPUMCTX_EXTRN_FS)
4067 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4068 if (a_fWhat & CPUMCTX_EXTRN_GS)
4069 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4070 }
4071#endif
4072 }
4073
4074 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4075 {
4076 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4077 AssertRC(rc);
4078 }
4079
4080 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4081 vmxHCImportGuestLdtr(pVCpu);
4082
4083 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4084 {
4085 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4086 uint32_t u32Val;
4087 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4088 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4089 }
4090
4091 /* Guest IDTR. */
4092 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4093 {
4094 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4095 uint32_t u32Val;
4096 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4097 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4098 }
4099
4100 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4101 {
4102#ifndef IN_NEM_DARWIN
4103 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4104#endif
4105 {
4106 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4107 AssertRC(rc);
4108 }
4109 }
4110
4111 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4112 {
4113 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4114 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4115 uint32_t u32Val;
4116 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4117 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4118 }
4119
4120#ifndef IN_NEM_DARWIN
4121 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4122 {
4123 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4124 && pVM->hmr0.s.fAllow64BitGuests)
4125 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4126 }
4127
4128 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4129 {
4130 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4131 && pVM->hmr0.s.fAllow64BitGuests)
4132 {
4133 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4134 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4135 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4136 }
4137 }
4138
4139 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4140 {
4141 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4142 AssertRCReturn(rc1, rc1);
4143 }
4144#else
4145 NOREF(pVM);
4146#endif
4147
4148 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4149 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4150
4151 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4152 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4153
4154 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4155 vmxHCImportGuestCr3(pVCpu);
4156
4157#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4158 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4159 {
4160 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4161 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4162 {
4163 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4164 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4165 AssertRCReturn(rc, rc);
4166 }
4167 }
4168#endif
4169
4170 /* Update fExtrn. */
4171 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4172
4173 /* If everything has been imported, clear the HM keeper bit. */
4174 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4175 {
4176#ifndef IN_NEM_DARWIN
4177 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4178#else
4179 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4180#endif
4181 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4182 }
4183
4184 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4185
4186 /*
4187 * Honor any pending CR3 updates.
4188 *
4189 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4190 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4191 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4192 *
4193 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4194 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4195 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4196 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4197 *
4198 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4199 *
4200 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4201 */
4202#ifndef IN_NEM_DARWIN
4203 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4204 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4205 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4206 return VINF_SUCCESS;
4207 ASMSetFlags(fEFlags);
4208#else
4209 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4210 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4211 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4212 return VINF_SUCCESS;
4213 RT_NOREF_PV(fEFlags);
4214#endif
4215
4216 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4217 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4218 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4219 return VINF_SUCCESS;
4220}
4221
4222
4223/**
4224 * Internal state fetcher.
4225 *
4226 * @returns VBox status code.
4227 * @param pVCpu The cross context virtual CPU structure.
4228 * @param pVmcsInfo The VMCS info. object.
4229 * @param pszCaller For logging.
4230 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4231 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4232 * already. This is ORed together with @a a_fWhat when
4233 * calculating what needs fetching (just for safety).
4234 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4235 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4236 * already. This is ORed together with @a a_fWhat when
4237 * calculating what needs fetching (just for safety).
4238 */
4239template<uint64_t const a_fWhat,
4240 uint64_t const a_fDoneLocal = 0,
4241 uint64_t const a_fDonePostExit = 0
4242#ifndef IN_NEM_DARWIN
4243 | CPUMCTX_EXTRN_INHIBIT_INT
4244 | CPUMCTX_EXTRN_INHIBIT_NMI
4245# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4246 | HMVMX_CPUMCTX_EXTRN_ALL
4247# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4248 | CPUMCTX_EXTRN_RFLAGS
4249# endif
4250#else /* IN_NEM_DARWIN */
4251 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4252#endif /* IN_NEM_DARWIN */
4253>
4254DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4255{
4256 RT_NOREF_PV(pszCaller);
4257 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4258 {
4259#ifndef IN_NEM_DARWIN
4260 /*
4261 * We disable interrupts to make the updating of the state and in particular
4262 * the fExtrn modification atomic wrt to preemption hooks.
4263 */
4264 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4265#else
4266 RTCCUINTREG const fEFlags = 0;
4267#endif
4268
4269 /*
4270 * We combine all three parameters and take the (probably) inlined optimized
4271 * code path for the new things specified in a_fWhat.
4272 *
4273 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4274 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4275 * also take the streamlined path when both of these are cleared in fExtrn
4276 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4277 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4278 */
4279 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4280 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4281 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4282 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4283 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4284 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4285 {
4286 int const rc = vmxHCImportGuestStateInner< a_fWhat
4287 & HMVMX_CPUMCTX_EXTRN_ALL
4288 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4289#ifndef IN_NEM_DARWIN
4290 ASMSetFlags(fEFlags);
4291#endif
4292 return rc;
4293 }
4294
4295#ifndef IN_NEM_DARWIN
4296 ASMSetFlags(fEFlags);
4297#endif
4298
4299 /*
4300 * We shouldn't normally get here, but it may happen when executing
4301 * in the debug run-loops. Typically, everything should already have
4302 * been fetched then. Otherwise call the fallback state import function.
4303 */
4304 if (fWhatToDo == 0)
4305 { /* hope the cause was the debug loop or something similar */ }
4306 else
4307 {
4308 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4309 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4310 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4311 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4312 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4313 }
4314 }
4315 return VINF_SUCCESS;
4316}
4317
4318
4319/**
4320 * Check per-VM and per-VCPU force flag actions that require us to go back to
4321 * ring-3 for one reason or another.
4322 *
4323 * @returns Strict VBox status code (i.e. informational status codes too)
4324 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4325 * ring-3.
4326 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4327 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4328 * interrupts)
4329 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4330 * all EMTs to be in ring-3.
4331 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4332 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4333 * to the EM loop.
4334 *
4335 * @param pVCpu The cross context virtual CPU structure.
4336 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4337 * @param fStepping Whether we are single-stepping the guest using the
4338 * hypervisor debugger.
4339 *
4340 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4341 * is no longer in VMX non-root mode.
4342 */
4343static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4344{
4345#ifndef IN_NEM_DARWIN
4346 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4347#endif
4348
4349 /*
4350 * Update pending interrupts into the APIC's IRR.
4351 */
4352 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4353 APICUpdatePendingInterrupts(pVCpu);
4354
4355 /*
4356 * Anything pending? Should be more likely than not if we're doing a good job.
4357 */
4358 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4359 if ( !fStepping
4360 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4361 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4362 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4363 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4364 return VINF_SUCCESS;
4365
4366 /* Pending PGM C3 sync. */
4367 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4368 {
4369 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4370 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4371 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4372 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4373 if (rcStrict != VINF_SUCCESS)
4374 {
4375 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4376 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4377 return rcStrict;
4378 }
4379 }
4380
4381 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4382 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4383 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4384 {
4385 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4386 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4387 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4388 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4389 return rc;
4390 }
4391
4392 /* Pending VM request packets, such as hardware interrupts. */
4393 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4394 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4395 {
4396 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4397 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4398 return VINF_EM_PENDING_REQUEST;
4399 }
4400
4401 /* Pending PGM pool flushes. */
4402 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4403 {
4404 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4405 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4406 return VINF_PGM_POOL_FLUSH_PENDING;
4407 }
4408
4409 /* Pending DMA requests. */
4410 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4411 {
4412 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4413 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4414 return VINF_EM_RAW_TO_R3;
4415 }
4416
4417#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4418 /*
4419 * Pending nested-guest events.
4420 *
4421 * Please note the priority of these events are specified and important.
4422 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4423 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4424 *
4425 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be
4426 * handled here. They'll be handled by the hardware while executing the nested-guest
4427 * or by us when we injecting events that are not part of VM-entry of the nested-guest.
4428 */
4429 if (fIsNestedGuest)
4430 {
4431 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */
4432 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4433 {
4434 Log4Func(("Pending nested-guest APIC-write\n"));
4435 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4436 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4437 if ( rcStrict == VINF_SUCCESS
4438 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4439 return rcStrict;
4440 }
4441
4442 /* Pending nested-guest monitor-trap flag (MTF). */
4443 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4444 {
4445 Log4Func(("Pending nested-guest MTF\n"));
4446 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4447 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4448 return rcStrict;
4449 }
4450
4451 /* Pending nested-guest VMX-preemption timer expired. */
4452 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4453 {
4454 Log4Func(("Pending nested-guest preempt timer\n"));
4455 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4456 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4457 return rcStrict;
4458 }
4459 }
4460#else
4461 NOREF(fIsNestedGuest);
4462#endif
4463
4464 return VINF_SUCCESS;
4465}
4466
4467
4468/**
4469 * Converts any TRPM trap into a pending HM event. This is typically used when
4470 * entering from ring-3 (not longjmp returns).
4471 *
4472 * @param pVCpu The cross context virtual CPU structure.
4473 */
4474static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4475{
4476 Assert(TRPMHasTrap(pVCpu));
4477 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4478
4479 uint8_t uVector;
4480 TRPMEVENT enmTrpmEvent;
4481 uint32_t uErrCode;
4482 RTGCUINTPTR GCPtrFaultAddress;
4483 uint8_t cbInstr;
4484 bool fIcebp;
4485
4486 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4487 AssertRC(rc);
4488
4489 uint32_t u32IntInfo;
4490 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4491 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4492
4493 rc = TRPMResetTrap(pVCpu);
4494 AssertRC(rc);
4495 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4496 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4497
4498 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4499}
4500
4501
4502/**
4503 * Converts the pending HM event into a TRPM trap.
4504 *
4505 * @param pVCpu The cross context virtual CPU structure.
4506 */
4507static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4508{
4509 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4510
4511 /* If a trap was already pending, we did something wrong! */
4512 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4513
4514 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4515 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4516 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4517
4518 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4519
4520 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4521 AssertRC(rc);
4522
4523 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4524 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4525
4526 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4527 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4528 else
4529 {
4530 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4531 switch (uVectorType)
4532 {
4533 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4534 TRPMSetTrapDueToIcebp(pVCpu);
4535 RT_FALL_THRU();
4536 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4537 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4538 {
4539 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4540 || ( uVector == X86_XCPT_BP /* INT3 */
4541 || uVector == X86_XCPT_OF /* INTO */
4542 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4543 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4544 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4545 break;
4546 }
4547 }
4548 }
4549
4550 /* We're now done converting the pending event. */
4551 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4552}
4553
4554
4555/**
4556 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4557 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4558 *
4559 * @param pVCpu The cross context virtual CPU structure.
4560 * @param pVmcsInfo The VMCS info. object.
4561 */
4562static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4563{
4564 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4565 {
4566 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4567 {
4568 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4569 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4570 AssertRC(rc);
4571 }
4572 Log4Func(("Enabled interrupt-window exiting\n"));
4573 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4574}
4575
4576
4577/**
4578 * Clears the interrupt-window exiting control in the VMCS.
4579 *
4580 * @param pVCpu The cross context virtual CPU structure.
4581 * @param pVmcsInfo The VMCS info. object.
4582 */
4583DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4584{
4585 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4586 {
4587 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4588 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4589 AssertRC(rc);
4590 Log4Func(("Disabled interrupt-window exiting\n"));
4591 }
4592}
4593
4594
4595/**
4596 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4597 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4598 *
4599 * @param pVCpu The cross context virtual CPU structure.
4600 * @param pVmcsInfo The VMCS info. object.
4601 */
4602static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4603{
4604 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4605 {
4606 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4607 {
4608 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4609 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4610 AssertRC(rc);
4611 Log4Func(("Enabled NMI-window exiting\n"));
4612 }
4613 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4614}
4615
4616
4617/**
4618 * Clears the NMI-window exiting control in the VMCS.
4619 *
4620 * @param pVCpu The cross context virtual CPU structure.
4621 * @param pVmcsInfo The VMCS info. object.
4622 */
4623DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4624{
4625 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4626 {
4627 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4628 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4629 AssertRC(rc);
4630 Log4Func(("Disabled NMI-window exiting\n"));
4631 }
4632}
4633
4634
4635/**
4636 * Injects an event into the guest upon VM-entry by updating the relevant fields
4637 * in the VM-entry area in the VMCS.
4638 *
4639 * @returns Strict VBox status code (i.e. informational status codes too).
4640 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4641 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure.
4644 * @param pVmcsInfo The VMCS info object.
4645 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4646 * @param pEvent The event being injected.
4647 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4648 * will be updated if necessary. This cannot not be NULL.
4649 * @param fStepping Whether we're single-stepping guest execution and should
4650 * return VINF_EM_DBG_STEPPED if the event is injected
4651 * directly (registers modified by us, not by hardware on
4652 * VM-entry).
4653 */
4654static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4655 bool fStepping, uint32_t *pfIntrState)
4656{
4657 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4658 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4659 Assert(pfIntrState);
4660
4661#ifdef IN_NEM_DARWIN
4662 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4663#endif
4664
4665 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4666 uint32_t u32IntInfo = pEvent->u64IntInfo;
4667 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4668 uint32_t const cbInstr = pEvent->cbInstr;
4669 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4670 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4671 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4672
4673#ifdef VBOX_STRICT
4674 /*
4675 * Validate the error-code-valid bit for hardware exceptions.
4676 * No error codes for exceptions in real-mode.
4677 *
4678 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4679 */
4680 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4681 && !CPUMIsGuestInRealModeEx(pCtx))
4682 {
4683 switch (uVector)
4684 {
4685 case X86_XCPT_PF:
4686 case X86_XCPT_DF:
4687 case X86_XCPT_TS:
4688 case X86_XCPT_NP:
4689 case X86_XCPT_SS:
4690 case X86_XCPT_GP:
4691 case X86_XCPT_AC:
4692 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4693 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4694 RT_FALL_THRU();
4695 default:
4696 break;
4697 }
4698 }
4699
4700 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4701 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4702 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4703#endif
4704
4705 RT_NOREF(uVector);
4706 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4707 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4708 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4709 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4710 {
4711 Assert(uVector <= X86_XCPT_LAST);
4712 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4713 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4714 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4715 }
4716 else
4717 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4718
4719 /*
4720 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4721 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4722 * interrupt handler in the (real-mode) guest.
4723 *
4724 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4725 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4726 */
4727 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4728 {
4729#ifndef IN_NEM_DARWIN
4730 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4731#endif
4732 {
4733 /*
4734 * For CPUs with unrestricted guest execution enabled and with the guest
4735 * in real-mode, we must not set the deliver-error-code bit.
4736 *
4737 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4738 */
4739 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4740 }
4741#ifndef IN_NEM_DARWIN
4742 else
4743 {
4744 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4745 Assert(PDMVmmDevHeapIsEnabled(pVM));
4746 Assert(pVM->hm.s.vmx.pRealModeTSS);
4747 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4748
4749 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4750 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4751 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4752 AssertRCReturn(rc2, rc2);
4753
4754 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4755 size_t const cbIdtEntry = sizeof(X86IDTR16);
4756 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4757 {
4758 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4759 if (uVector == X86_XCPT_DF)
4760 return VINF_EM_RESET;
4761
4762 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4763 No error codes for exceptions in real-mode. */
4764 if (uVector == X86_XCPT_GP)
4765 {
4766 static HMEVENT const s_EventXcptDf
4767 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4768 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4769 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4770 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4771 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4772 }
4773
4774 /*
4775 * If we're injecting an event with no valid IDT entry, inject a #GP.
4776 * No error codes for exceptions in real-mode.
4777 *
4778 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4779 */
4780 static HMEVENT const s_EventXcptGp
4781 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4782 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4783 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4784 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4785 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4786 }
4787
4788 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4789 uint16_t uGuestIp = pCtx->ip;
4790 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4791 {
4792 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4793 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4794 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4795 }
4796 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4797 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4798
4799 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4800 X86IDTR16 IdtEntry;
4801 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4802 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4803 AssertRCReturn(rc2, rc2);
4804
4805 /* Construct the stack frame for the interrupt/exception handler. */
4806 VBOXSTRICTRC rcStrict;
4807 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4808 if (rcStrict == VINF_SUCCESS)
4809 {
4810 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4811 if (rcStrict == VINF_SUCCESS)
4812 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4813 }
4814
4815 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4816 if (rcStrict == VINF_SUCCESS)
4817 {
4818 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4819 pCtx->rip = IdtEntry.offSel;
4820 pCtx->cs.Sel = IdtEntry.uSel;
4821 pCtx->cs.ValidSel = IdtEntry.uSel;
4822 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4823 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4824 && uVector == X86_XCPT_PF)
4825 pCtx->cr2 = GCPtrFault;
4826
4827 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4828 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4829 | HM_CHANGED_GUEST_RSP);
4830
4831 /*
4832 * If we delivered a hardware exception (other than an NMI) and if there was
4833 * block-by-STI in effect, we should clear it.
4834 */
4835 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4836 {
4837 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4838 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4839 Log4Func(("Clearing inhibition due to STI\n"));
4840 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4841 }
4842
4843 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4844 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4845
4846 /*
4847 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4848 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4849 */
4850 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4851
4852 /*
4853 * If we eventually support nested-guest execution without unrestricted guest execution,
4854 * we should set fInterceptEvents here.
4855 */
4856 Assert(!fIsNestedGuest);
4857
4858 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4859 if (fStepping)
4860 rcStrict = VINF_EM_DBG_STEPPED;
4861 }
4862 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4863 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4864 return rcStrict;
4865 }
4866#else
4867 RT_NOREF(pVmcsInfo);
4868#endif
4869 }
4870
4871 /*
4872 * Validate.
4873 */
4874 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4875 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4876
4877 /*
4878 * Inject the event into the VMCS.
4879 */
4880 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4881 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4882 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4883 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4884 AssertRC(rc);
4885
4886 /*
4887 * Update guest CR2 if this is a page-fault.
4888 */
4889 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4890 pCtx->cr2 = GCPtrFault;
4891
4892 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4893 return VINF_SUCCESS;
4894}
4895
4896
4897/**
4898 * Evaluates the event to be delivered to the guest and sets it as the pending
4899 * event.
4900 *
4901 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4902 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4903 * NOT restore these force-flags.
4904 *
4905 * @returns Strict VBox status code (i.e. informational status codes too).
4906 * @param pVCpu The cross context virtual CPU structure.
4907 * @param pVmcsInfo The VMCS information structure.
4908 * @param pfIntrState Where to store the updated VMX guest-interruptibility
4909 * state.
4910 */
4911static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4912{
4913 Assert(pfIntrState);
4914 Assert(!TRPMHasTrap(pVCpu));
4915
4916 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
4917
4918 /*
4919 * Evaluate if a new event needs to be injected.
4920 * An event that's already pending has already performed all necessary checks.
4921 */
4922 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4923 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4924 {
4925 /** @todo SMI. SMIs take priority over NMIs. */
4926
4927 /*
4928 * NMIs.
4929 * NMIs take priority over external interrupts.
4930 */
4931 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4932 {
4933 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4934 {
4935 /* Finally, inject the NMI and we're done. */
4936 vmxHCSetPendingXcptNmi(pVCpu);
4937 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4938 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4939 return VINF_SUCCESS;
4940 }
4941 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4942 }
4943 else
4944 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4945
4946 /*
4947 * External interrupts (PIC/APIC).
4948 */
4949 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4950 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4951 {
4952 Assert(!DBGFIsStepping(pVCpu));
4953 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4954 AssertRC(rc);
4955
4956 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF)
4957 {
4958 /*
4959 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it.
4960 * We cannot re-request the interrupt from the controller again.
4961 */
4962 uint8_t u8Interrupt;
4963 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4964 if (RT_SUCCESS(rc))
4965 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4966 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4967 {
4968 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4969 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4970 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4971 /*
4972 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4973 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4974 * need to re-set this force-flag here.
4975 */
4976 }
4977 else
4978 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4979
4980 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4981 return VINF_SUCCESS;
4982 }
4983 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4984 }
4985 else
4986 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4987 }
4988 else
4989 {
4990 /*
4991 * An event is being injected or we are in an interrupt shadow.
4992 * If another event is pending currently, instruct VT-x to cause a VM-exit as
4993 * soon as the guest is ready to accept it.
4994 */
4995 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4996 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4997 else
4998 {
4999 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5000 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5001 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5002 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5003 else
5004 {
5005 /* It's possible that interrupt-window exiting is still active, clear it as it's now unnecessary. */
5006 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
5007 }
5008 }
5009 }
5010
5011 return VINF_SUCCESS;
5012}
5013
5014
5015#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5016/**
5017 * Evaluates the event to be delivered to the nested-guest and sets it as the
5018 * pending event.
5019 *
5020 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
5021 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
5022 * NOT restore these force-flags.
5023 *
5024 * @returns Strict VBox status code (i.e. informational status codes too).
5025 * @param pVCpu The cross context virtual CPU structure.
5026 * @param pVmcsInfo The VMCS information structure.
5027 * @param pfIntrState Where to store the updated VMX guest-interruptibility
5028 * state.
5029 *
5030 * @remarks The guest must be in VMX non-root mode.
5031 */
5032static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
5033{
5034 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5035
5036 Assert(pfIntrState);
5037 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
5038 Assert(!TRPMHasTrap(pVCpu));
5039
5040 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
5041
5042 /*
5043 * If we are injecting an event, all necessary checks have been performed.
5044 * Any interrupt-window or NMI-window exiting would have been setup by the
5045 * nested-guest while we merged controls.
5046 */
5047 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5048 return VINF_SUCCESS;
5049
5050 /*
5051 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been
5052 * made pending (TRPM to HM event) and would be handled above if we resumed
5053 * execution in HM. If somehow we fell back to emulation after the
5054 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt
5055 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX
5056 * intercepts should be active and any events pending here have been generated
5057 * while executing the guest in VMX non-root mode after virtual VM-entry completed.
5058 */
5059 Assert(CPUMIsGuestVmxInterceptEvents(pCtx));
5060
5061 /*
5062 * Interrupt shadows MAY block NMIs.
5063 * They also blocks external-interrupts and MAY block external-interrupt VM-exits.
5064 *
5065 * See Intel spec. 24.4.2 "Guest Non-Register State".
5066 * See Intel spec. 25.4.1 "Event Blocking".
5067 */
5068 if (!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
5069 { /* likely */ }
5070 else
5071 return VINF_SUCCESS;
5072
5073 /** @todo SMI. SMIs take priority over NMIs. */
5074
5075 /*
5076 * NMIs.
5077 * NMIs take priority over interrupts.
5078 */
5079 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
5080 {
5081 /*
5082 * Nested-guest NMI-window exiting.
5083 * The NMI-window exit must happen regardless of whether an NMI is pending
5084 * provided virtual-NMI blocking is not in effect.
5085 *
5086 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5087 */
5088 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
5089 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx))
5090 {
5091 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5092 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
5093 }
5094
5095 /*
5096 * For a nested-guest, the FF always indicates the outer guest's ability to
5097 * receive an NMI while the guest-interruptibility state bit depends on whether
5098 * the nested-hypervisor is using virtual-NMIs.
5099 *
5100 * It is very important that we also clear the force-flag if we are causing
5101 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal
5102 * with re-injecting or discarding the NMI. This fixes the bug that showed up
5103 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}.
5104 */
5105 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5106 {
5107 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
5108 return IEMExecVmxVmexitXcptNmi(pVCpu);
5109 vmxHCSetPendingXcptNmi(pVCpu);
5110 return VINF_SUCCESS;
5111 }
5112 }
5113
5114 /*
5115 * Nested-guest interrupt-window exiting.
5116 *
5117 * We must cause the interrupt-window exit regardless of whether an interrupt is pending
5118 * provided virtual interrupts are enabled.
5119 *
5120 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5121 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5122 */
5123 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
5124 && CPUMIsGuestVmxVirtIntrEnabled(pCtx))
5125 {
5126 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
5127 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
5128 }
5129
5130 /*
5131 * External interrupts (PIC/APIC).
5132 *
5133 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF.
5134 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always.
5135 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued
5136 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5137 *
5138 * NMIs block external interrupts as they are dispatched through the interrupt gate (vector 2)
5139 * which automatically clears EFLAGS.IF. Also it's possible an NMI handler could enable interrupts
5140 * and thus we should not check for NMI inhibition here.
5141 *
5142 * See Intel spec. 25.4.1 "Event Blocking".
5143 * See Intel spec. 6.8.1 "Masking Maskable Hardware Interrupts".
5144 */
5145 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5146 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5147 {
5148 Assert(!DBGFIsStepping(pVCpu));
5149 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
5150 AssertRC(rc);
5151 if (CPUMIsGuestVmxPhysIntrEnabled(pCtx))
5152 {
5153 /* Nested-guest external interrupt VM-exit. */
5154 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
5155 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
5156 {
5157 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5158 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5159 return rcStrict;
5160 }
5161
5162 /*
5163 * Fetch the external interrupt from the interrupt controller.
5164 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to
5165 * the nested-hypervisor. We cannot re-request the interrupt from the controller again.
5166 */
5167 uint8_t u8Interrupt;
5168 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5169 if (RT_SUCCESS(rc))
5170 {
5171 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */
5172 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5173 {
5174 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT));
5175 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5176 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5177 return rcStrict;
5178 }
5179 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5180 return VINF_SUCCESS;
5181 }
5182 }
5183 }
5184 return VINF_SUCCESS;
5185}
5186#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5187
5188
5189/**
5190 * Injects any pending events into the guest if the guest is in a state to
5191 * receive them.
5192 *
5193 * @returns Strict VBox status code (i.e. informational status codes too).
5194 * @param pVCpu The cross context virtual CPU structure.
5195 * @param pVmcsInfo The VMCS information structure.
5196 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5197 * @param fIntrState The VT-x guest-interruptibility state.
5198 * @param fStepping Whether we are single-stepping the guest using the
5199 * hypervisor debugger and should return
5200 * VINF_EM_DBG_STEPPED if the event was dispatched
5201 * directly.
5202 */
5203static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5204 uint32_t fIntrState, bool fStepping)
5205{
5206 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5207#ifndef IN_NEM_DARWIN
5208 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5209#endif
5210
5211#ifdef VBOX_STRICT
5212 /*
5213 * Verify guest-interruptibility state.
5214 *
5215 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5216 * since injecting an event may modify the interruptibility state and we must thus always
5217 * use fIntrState.
5218 */
5219 {
5220 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5221 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5222 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5223 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5224 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5225 Assert(!TRPMHasTrap(pVCpu));
5226 NOREF(fBlockMovSS); NOREF(fBlockSti);
5227 }
5228#endif
5229
5230 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5231 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5232 {
5233 /*
5234 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5235 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5236 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5237 *
5238 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5239 */
5240 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5241#ifdef VBOX_STRICT
5242 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5243 {
5244 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5245 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5246 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5247 }
5248 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5249 {
5250 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5251 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5252 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5253 }
5254#endif
5255 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5256 uIntType));
5257
5258 /*
5259 * Inject the event and get any changes to the guest-interruptibility state.
5260 *
5261 * The guest-interruptibility state may need to be updated if we inject the event
5262 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5263 */
5264 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5265 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5266
5267 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5268 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5269 else
5270 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5271 }
5272
5273 /*
5274 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5275 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5276 */
5277 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5278 && !fIsNestedGuest)
5279 {
5280 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5281
5282 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5283 {
5284 /*
5285 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5286 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5287 */
5288 Assert(!DBGFIsStepping(pVCpu));
5289 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5290 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5291 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5292 AssertRC(rc);
5293 }
5294 else
5295 {
5296 /*
5297 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5298 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5299 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5300 * we use MTF, so just make sure it's called before executing guest-code.
5301 */
5302 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5303 }
5304 }
5305 /* else: for nested-guest currently handling while merging controls. */
5306
5307 /*
5308 * Finally, update the guest-interruptibility state.
5309 *
5310 * This is required for the real-on-v86 software interrupt injection, for
5311 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5312 */
5313 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5314 AssertRC(rc);
5315
5316 /*
5317 * There's no need to clear the VM-entry interruption-information field here if we're not
5318 * injecting anything. VT-x clears the valid bit on every VM-exit.
5319 *
5320 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5321 */
5322
5323 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5324 return rcStrict;
5325}
5326
5327
5328/**
5329 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5330 * and update error record fields accordingly.
5331 *
5332 * @returns VMX_IGS_* error codes.
5333 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5334 * wrong with the guest state.
5335 *
5336 * @param pVCpu The cross context virtual CPU structure.
5337 * @param pVmcsInfo The VMCS info. object.
5338 *
5339 * @remarks This function assumes our cache of the VMCS controls
5340 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5341 */
5342static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5343{
5344#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5345#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5346
5347 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5348 uint32_t uError = VMX_IGS_ERROR;
5349 uint32_t u32IntrState = 0;
5350#ifndef IN_NEM_DARWIN
5351 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5352 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5353#else
5354 bool const fUnrestrictedGuest = true;
5355#endif
5356 do
5357 {
5358 int rc;
5359
5360 /*
5361 * Guest-interruptibility state.
5362 *
5363 * Read this first so that any check that fails prior to those that actually
5364 * require the guest-interruptibility state would still reflect the correct
5365 * VMCS value and avoids causing further confusion.
5366 */
5367 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5368 AssertRC(rc);
5369
5370 uint32_t u32Val;
5371 uint64_t u64Val;
5372
5373 /*
5374 * CR0.
5375 */
5376 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5377 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5378 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5379 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5380 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5381 if (fUnrestrictedGuest)
5382 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5383
5384 uint64_t u64GuestCr0;
5385 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5386 AssertRC(rc);
5387 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5388 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5389 if ( !fUnrestrictedGuest
5390 && (u64GuestCr0 & X86_CR0_PG)
5391 && !(u64GuestCr0 & X86_CR0_PE))
5392 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5393
5394 /*
5395 * CR4.
5396 */
5397 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5398 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5399 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5400
5401 uint64_t u64GuestCr4;
5402 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5403 AssertRC(rc);
5404 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5405 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5406
5407 /*
5408 * IA32_DEBUGCTL MSR.
5409 */
5410 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5411 AssertRC(rc);
5412 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5413 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5414 {
5415 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5416 }
5417 uint64_t u64DebugCtlMsr = u64Val;
5418
5419#ifdef VBOX_STRICT
5420 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5421 AssertRC(rc);
5422 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5423#endif
5424 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5425
5426 /*
5427 * RIP and RFLAGS.
5428 */
5429 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5430 AssertRC(rc);
5431 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5432 if ( !fLongModeGuest
5433 || !pCtx->cs.Attr.n.u1Long)
5434 {
5435 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5436 }
5437 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5438 * must be identical if the "IA-32e mode guest" VM-entry
5439 * control is 1 and CS.L is 1. No check applies if the
5440 * CPU supports 64 linear-address bits. */
5441
5442 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5443 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5444 AssertRC(rc);
5445 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5446 VMX_IGS_RFLAGS_RESERVED);
5447 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5448 uint32_t const u32Eflags = u64Val;
5449
5450 if ( fLongModeGuest
5451 || ( fUnrestrictedGuest
5452 && !(u64GuestCr0 & X86_CR0_PE)))
5453 {
5454 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5455 }
5456
5457 uint32_t u32EntryInfo;
5458 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5459 AssertRC(rc);
5460 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5461 {
5462 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5463 }
5464
5465 /*
5466 * 64-bit checks.
5467 */
5468 if (fLongModeGuest)
5469 {
5470 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5471 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5472 }
5473
5474 if ( !fLongModeGuest
5475 && (u64GuestCr4 & X86_CR4_PCIDE))
5476 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5477
5478 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5479 * 51:32 beyond the processor's physical-address width are 0. */
5480
5481 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5482 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5483 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5484
5485#ifndef IN_NEM_DARWIN
5486 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5487 AssertRC(rc);
5488 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5489
5490 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5491 AssertRC(rc);
5492 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5493#endif
5494
5495 /*
5496 * PERF_GLOBAL MSR.
5497 */
5498 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5499 {
5500 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5501 AssertRC(rc);
5502 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5503 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5504 }
5505
5506 /*
5507 * PAT MSR.
5508 */
5509 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5510 {
5511 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5512 AssertRC(rc);
5513 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5514 for (unsigned i = 0; i < 8; i++)
5515 {
5516 uint8_t u8Val = (u64Val & 0xff);
5517 if ( u8Val > MSR_IA32_PAT_MT_UCD
5518 || u8Val == MSR_IA32_PAT_MT_RSVD_2
5519 || u8Val == MSR_IA32_PAT_MT_RSVD_3)
5520 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5521 u64Val >>= 8;
5522 }
5523 }
5524
5525 /*
5526 * EFER MSR.
5527 */
5528 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5529 {
5530 Assert(g_fHmVmxSupportsVmcsEfer);
5531 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5532 AssertRC(rc);
5533 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5534 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5535 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5536 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5537 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5538 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5539 * iemVmxVmentryCheckGuestState(). */
5540 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5541 || !(u64GuestCr0 & X86_CR0_PG)
5542 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5543 VMX_IGS_EFER_LMA_LME_MISMATCH);
5544 }
5545
5546 /*
5547 * Segment registers.
5548 */
5549 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5550 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5551 if (!(u32Eflags & X86_EFL_VM))
5552 {
5553 /* CS */
5554 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5555 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5556 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5557 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5558 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5559 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5560 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5561 /* CS cannot be loaded with NULL in protected mode. */
5562 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5563 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5564 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5565 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5566 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5567 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5568 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5569 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5570 else
5571 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5572
5573 /* SS */
5574 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5575 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5576 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5577 if ( !(pCtx->cr0 & X86_CR0_PE)
5578 || pCtx->cs.Attr.n.u4Type == 3)
5579 {
5580 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5581 }
5582
5583 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5584 {
5585 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5586 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5587 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5588 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5589 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5590 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5591 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5592 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5593 }
5594
5595 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5596 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5597 {
5598 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5599 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5600 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5601 || pCtx->ds.Attr.n.u4Type > 11
5602 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5603 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5604 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5605 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5606 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5607 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5608 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5609 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5610 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5611 }
5612 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5613 {
5614 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5615 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5616 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5617 || pCtx->es.Attr.n.u4Type > 11
5618 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5619 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5620 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5621 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5622 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5623 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5624 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5625 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5626 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5627 }
5628 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5629 {
5630 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5631 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5632 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5633 || pCtx->fs.Attr.n.u4Type > 11
5634 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5635 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5636 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5637 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5638 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5639 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5640 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5641 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5642 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5643 }
5644 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5645 {
5646 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5647 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5648 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5649 || pCtx->gs.Attr.n.u4Type > 11
5650 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5651 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5652 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5653 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5654 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5655 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5656 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5657 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5658 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5659 }
5660 /* 64-bit capable CPUs. */
5661 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5662 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5663 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5664 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5665 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5666 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5667 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5668 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5669 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5670 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5671 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5672 }
5673 else
5674 {
5675 /* V86 mode checks. */
5676 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5677 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5678 {
5679 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5680 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5681 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5682 }
5683 else
5684 {
5685 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5686 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5687 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5688 }
5689
5690 /* CS */
5691 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5692 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5693 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5694 /* SS */
5695 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5696 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5697 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5698 /* DS */
5699 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5700 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5701 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5702 /* ES */
5703 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5704 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5705 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5706 /* FS */
5707 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5708 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5709 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5710 /* GS */
5711 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5712 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5713 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5714 /* 64-bit capable CPUs. */
5715 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5716 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5717 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5718 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5719 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5720 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5721 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5722 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5723 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5724 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5725 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5726 }
5727
5728 /*
5729 * TR.
5730 */
5731 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5732 /* 64-bit capable CPUs. */
5733 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5734 if (fLongModeGuest)
5735 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5736 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5737 else
5738 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5739 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5740 VMX_IGS_TR_ATTR_TYPE_INVALID);
5741 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5742 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5743 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5744 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5745 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5746 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5747 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5748 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5749
5750 /*
5751 * GDTR and IDTR (64-bit capable checks).
5752 */
5753 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5754 AssertRC(rc);
5755 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5756
5757 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5758 AssertRC(rc);
5759 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5760
5761 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5762 AssertRC(rc);
5763 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5764
5765 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5766 AssertRC(rc);
5767 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5768
5769 /*
5770 * Guest Non-Register State.
5771 */
5772 /* Activity State. */
5773 uint32_t u32ActivityState;
5774 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5775 AssertRC(rc);
5776 HMVMX_CHECK_BREAK( !u32ActivityState
5777 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5778 VMX_IGS_ACTIVITY_STATE_INVALID);
5779 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5780 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5781
5782 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5783 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5784 {
5785 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5786 }
5787
5788 /** @todo Activity state and injecting interrupts. Left as a todo since we
5789 * currently don't use activity states but ACTIVE. */
5790
5791 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5792 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5793
5794 /* Guest interruptibility-state. */
5795 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5796 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5797 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5798 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5799 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5800 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5801 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5802 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5803 {
5804 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5805 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5806 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5807 }
5808 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5809 {
5810 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5811 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5812 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5813 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5814 }
5815 /** @todo Assumes the processor is not in SMM. */
5816 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5817 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5818 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5819 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5820 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5821 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5822 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5823 {
5824 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5825 }
5826
5827 /* Pending debug exceptions. */
5828 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5829 AssertRC(rc);
5830 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5831 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5832 u32Val = u64Val; /* For pending debug exceptions checks below. */
5833
5834 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5835 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5836 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5837 {
5838 if ( (u32Eflags & X86_EFL_TF)
5839 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5840 {
5841 /* Bit 14 is PendingDebug.BS. */
5842 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5843 }
5844 if ( !(u32Eflags & X86_EFL_TF)
5845 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5846 {
5847 /* Bit 14 is PendingDebug.BS. */
5848 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5849 }
5850 }
5851
5852#ifndef IN_NEM_DARWIN
5853 /* VMCS link pointer. */
5854 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5855 AssertRC(rc);
5856 if (u64Val != UINT64_C(0xffffffffffffffff))
5857 {
5858 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5859 /** @todo Bits beyond the processor's physical-address width MBZ. */
5860 /** @todo SMM checks. */
5861 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5862 Assert(pVmcsInfo->pvShadowVmcs);
5863 VMXVMCSREVID VmcsRevId;
5864 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5865 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5866 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5867 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5868 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5869 }
5870
5871 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5872 * not using nested paging? */
5873 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5874 && !fLongModeGuest
5875 && CPUMIsGuestInPAEModeEx(pCtx))
5876 {
5877 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5878 AssertRC(rc);
5879 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5880
5881 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5882 AssertRC(rc);
5883 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5884
5885 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5886 AssertRC(rc);
5887 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5888
5889 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5890 AssertRC(rc);
5891 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5892 }
5893#endif
5894
5895 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5896 if (uError == VMX_IGS_ERROR)
5897 uError = VMX_IGS_REASON_NOT_FOUND;
5898 } while (0);
5899
5900 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5901 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5902 return uError;
5903
5904#undef HMVMX_ERROR_BREAK
5905#undef HMVMX_CHECK_BREAK
5906}
5907
5908
5909#ifndef HMVMX_USE_FUNCTION_TABLE
5910/**
5911 * Handles a guest VM-exit from hardware-assisted VMX execution.
5912 *
5913 * @returns Strict VBox status code (i.e. informational status codes too).
5914 * @param pVCpu The cross context virtual CPU structure.
5915 * @param pVmxTransient The VMX-transient structure.
5916 */
5917DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5918{
5919#ifdef DEBUG_ramshankar
5920# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5921 do { \
5922 if (a_fSave != 0) \
5923 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5924 VBOXSTRICTRC rcStrict = a_CallExpr; \
5925 if (a_fSave != 0) \
5926 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5927 return rcStrict; \
5928 } while (0)
5929#else
5930# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5931#endif
5932 uint32_t const uExitReason = pVmxTransient->uExitReason;
5933 switch (uExitReason)
5934 {
5935 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5936 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5937 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5938 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5939 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5940 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5941 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5942 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5943 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5944 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5945 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5946 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5947 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5948 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5949 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5950 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5951 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5952 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5953 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5954 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5955 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5956 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5957 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5958 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5959 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5960 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5961 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5962 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5963 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5964 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5965#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5966 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5967 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5968 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5969 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5970 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5971 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5972 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5973 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5974 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5975 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5976#else
5977 case VMX_EXIT_VMCLEAR:
5978 case VMX_EXIT_VMLAUNCH:
5979 case VMX_EXIT_VMPTRLD:
5980 case VMX_EXIT_VMPTRST:
5981 case VMX_EXIT_VMREAD:
5982 case VMX_EXIT_VMRESUME:
5983 case VMX_EXIT_VMWRITE:
5984 case VMX_EXIT_VMXOFF:
5985 case VMX_EXIT_VMXON:
5986 case VMX_EXIT_INVVPID:
5987 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5988#endif
5989#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5990 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5991#else
5992 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5993#endif
5994
5995 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5996 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5997 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5998
5999 case VMX_EXIT_INIT_SIGNAL:
6000 case VMX_EXIT_SIPI:
6001 case VMX_EXIT_IO_SMI:
6002 case VMX_EXIT_SMI:
6003 case VMX_EXIT_ERR_MSR_LOAD:
6004 case VMX_EXIT_ERR_MACHINE_CHECK:
6005 case VMX_EXIT_PML_FULL:
6006 case VMX_EXIT_VIRTUALIZED_EOI:
6007 case VMX_EXIT_GDTR_IDTR_ACCESS:
6008 case VMX_EXIT_LDTR_TR_ACCESS:
6009 case VMX_EXIT_APIC_WRITE:
6010 case VMX_EXIT_RDRAND:
6011 case VMX_EXIT_RSM:
6012 case VMX_EXIT_VMFUNC:
6013 case VMX_EXIT_ENCLS:
6014 case VMX_EXIT_RDSEED:
6015 case VMX_EXIT_XSAVES:
6016 case VMX_EXIT_XRSTORS:
6017 case VMX_EXIT_UMWAIT:
6018 case VMX_EXIT_TPAUSE:
6019 case VMX_EXIT_LOADIWKEY:
6020 default:
6021 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6022 }
6023#undef VMEXIT_CALL_RET
6024}
6025#endif /* !HMVMX_USE_FUNCTION_TABLE */
6026
6027
6028#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6029/**
6030 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
6031 *
6032 * @returns Strict VBox status code (i.e. informational status codes too).
6033 * @param pVCpu The cross context virtual CPU structure.
6034 * @param pVmxTransient The VMX-transient structure.
6035 */
6036DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6037{
6038#ifdef DEBUG_ramshankar
6039# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
6040 do { \
6041 if (a_fSave != 0) \
6042 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
6043 VBOXSTRICTRC rcStrict = a_CallExpr; \
6044 return rcStrict; \
6045 } while (0)
6046#else
6047# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
6048#endif
6049
6050 uint32_t const uExitReason = pVmxTransient->uExitReason;
6051 switch (uExitReason)
6052 {
6053# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6054 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
6055 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
6056# else
6057 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
6058 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
6059# endif
6060 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
6061 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
6062 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
6063
6064 /*
6065 * We shouldn't direct host physical interrupts to the nested-guest.
6066 */
6067 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
6068
6069 /*
6070 * Instructions that cause VM-exits unconditionally or the condition is
6071 * always taken solely from the nested hypervisor (meaning if the VM-exit
6072 * happens, it's guaranteed to be a nested-guest VM-exit).
6073 *
6074 * - Provides VM-exit instruction length ONLY.
6075 */
6076 case VMX_EXIT_CPUID: /* Unconditional. */
6077 case VMX_EXIT_VMCALL:
6078 case VMX_EXIT_GETSEC:
6079 case VMX_EXIT_INVD:
6080 case VMX_EXIT_XSETBV:
6081 case VMX_EXIT_VMLAUNCH:
6082 case VMX_EXIT_VMRESUME:
6083 case VMX_EXIT_VMXOFF:
6084 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
6085 case VMX_EXIT_VMFUNC:
6086 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
6087
6088 /*
6089 * Instructions that cause VM-exits unconditionally or the condition is
6090 * always taken solely from the nested hypervisor (meaning if the VM-exit
6091 * happens, it's guaranteed to be a nested-guest VM-exit).
6092 *
6093 * - Provides VM-exit instruction length.
6094 * - Provides VM-exit information.
6095 * - Optionally provides Exit qualification.
6096 *
6097 * Since Exit qualification is 0 for all VM-exits where it is not
6098 * applicable, reading and passing it to the guest should produce
6099 * defined behavior.
6100 *
6101 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
6102 */
6103 case VMX_EXIT_INVEPT: /* Unconditional. */
6104 case VMX_EXIT_INVVPID:
6105 case VMX_EXIT_VMCLEAR:
6106 case VMX_EXIT_VMPTRLD:
6107 case VMX_EXIT_VMPTRST:
6108 case VMX_EXIT_VMXON:
6109 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
6110 case VMX_EXIT_LDTR_TR_ACCESS:
6111 case VMX_EXIT_RDRAND:
6112 case VMX_EXIT_RDSEED:
6113 case VMX_EXIT_XSAVES:
6114 case VMX_EXIT_XRSTORS:
6115 case VMX_EXIT_UMWAIT:
6116 case VMX_EXIT_TPAUSE:
6117 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6118
6119 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6120 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6121 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6122 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6123 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6124 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6125 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6126 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6127 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6128 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6129 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6130 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6131 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6132 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6133 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6134 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6135 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6136 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6137 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6138
6139 case VMX_EXIT_PREEMPT_TIMER:
6140 {
6141 /** @todo NSTVMX: Preempt timer. */
6142 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6143 }
6144
6145 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6146 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6147
6148 case VMX_EXIT_VMREAD:
6149 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6150
6151 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6152 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6153
6154 case VMX_EXIT_INIT_SIGNAL:
6155 case VMX_EXIT_SIPI:
6156 case VMX_EXIT_IO_SMI:
6157 case VMX_EXIT_SMI:
6158 case VMX_EXIT_ERR_MSR_LOAD:
6159 case VMX_EXIT_ERR_MACHINE_CHECK:
6160 case VMX_EXIT_PML_FULL:
6161 case VMX_EXIT_RSM:
6162 default:
6163 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6164 }
6165#undef VMEXIT_CALL_RET
6166}
6167#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6168
6169
6170/** @name VM-exit helpers.
6171 * @{
6172 */
6173/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6174/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6175/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6176
6177/** Macro for VM-exits called unexpectedly. */
6178#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6179 do { \
6180 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6181 return VERR_VMX_UNEXPECTED_EXIT; \
6182 } while (0)
6183
6184#ifdef VBOX_STRICT
6185# ifndef IN_NEM_DARWIN
6186/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6187# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6188 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6189
6190# define HMVMX_ASSERT_PREEMPT_CPUID() \
6191 do { \
6192 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6193 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6194 } while (0)
6195
6196# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6197 do { \
6198 AssertPtr((a_pVCpu)); \
6199 AssertPtr((a_pVmxTransient)); \
6200 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6201 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6202 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6203 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6204 Assert((a_pVmxTransient)->pVmcsInfo); \
6205 Assert(ASMIntAreEnabled()); \
6206 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6207 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6208 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6209 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6210 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6211 HMVMX_ASSERT_PREEMPT_CPUID(); \
6212 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6213 } while (0)
6214# else
6215# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6216# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6217# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6218 do { \
6219 AssertPtr((a_pVCpu)); \
6220 AssertPtr((a_pVmxTransient)); \
6221 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6222 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6223 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6224 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6225 Assert((a_pVmxTransient)->pVmcsInfo); \
6226 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6227 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6228 } while (0)
6229# endif
6230
6231# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6232 do { \
6233 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6234 Assert((a_pVmxTransient)->fIsNestedGuest); \
6235 } while (0)
6236
6237# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6238 do { \
6239 Log4Func(("\n")); \
6240 } while (0)
6241#else
6242# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6243 do { \
6244 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6245 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6246 } while (0)
6247
6248# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6249 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6250
6251# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6252#endif
6253
6254#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6255/** Macro that does the necessary privilege checks and intercepted VM-exits for
6256 * guests that attempted to execute a VMX instruction. */
6257# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6258 do \
6259 { \
6260 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6261 if (rcStrictTmp == VINF_SUCCESS) \
6262 { /* likely */ } \
6263 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6264 { \
6265 Assert((a_pVCpu)->hm.s.Event.fPending); \
6266 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6267 return VINF_SUCCESS; \
6268 } \
6269 else \
6270 { \
6271 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6272 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6273 } \
6274 } while (0)
6275
6276/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6277# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6278 do \
6279 { \
6280 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6281 (a_pGCPtrEffAddr)); \
6282 if (rcStrictTmp == VINF_SUCCESS) \
6283 { /* likely */ } \
6284 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6285 { \
6286 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6287 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6288 NOREF(uXcptTmp); \
6289 return VINF_SUCCESS; \
6290 } \
6291 else \
6292 { \
6293 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6294 return rcStrictTmp; \
6295 } \
6296 } while (0)
6297#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6298
6299
6300/**
6301 * Advances the guest RIP by the specified number of bytes.
6302 *
6303 * @param pVCpu The cross context virtual CPU structure.
6304 * @param cbInstr Number of bytes to advance the RIP by.
6305 *
6306 * @remarks No-long-jump zone!!!
6307 */
6308DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6309{
6310 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6311
6312 /*
6313 * Advance RIP.
6314 *
6315 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6316 * when the addition causes a "carry" into the upper half and check whether
6317 * we're in 64-bit and can go on with it or wether we should zap the top
6318 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6319 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6320 *
6321 * See PC wrap around tests in bs3-cpu-weird-1.
6322 */
6323 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6324 uint64_t const uRipNext = uRipPrev + cbInstr;
6325 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6326 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6327 pVCpu->cpum.GstCtx.rip = uRipNext;
6328 else
6329 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6330
6331 /*
6332 * Clear RF and interrupt shadowing.
6333 */
6334 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6335 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6336 else
6337 {
6338 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6339 {
6340 /** @todo \#DB - single step. */
6341 }
6342 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6343 }
6344 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6345
6346 /* Mark both RIP and RFLAGS as updated. */
6347 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6348}
6349
6350
6351/**
6352 * Advances the guest RIP after reading it from the VMCS.
6353 *
6354 * @returns VBox status code, no informational status codes.
6355 * @param pVCpu The cross context virtual CPU structure.
6356 * @param pVmxTransient The VMX-transient structure.
6357 *
6358 * @remarks No-long-jump zone!!!
6359 */
6360static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6361{
6362 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6363 /** @todo consider template here after checking callers. */
6364 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6365 AssertRCReturn(rc, rc);
6366
6367 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6368 return VINF_SUCCESS;
6369}
6370
6371
6372/**
6373 * Handle a condition that occurred while delivering an event through the guest or
6374 * nested-guest IDT.
6375 *
6376 * @returns Strict VBox status code (i.e. informational status codes too).
6377 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6378 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6379 * to continue execution of the guest which will delivery the \#DF.
6380 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6381 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6382 *
6383 * @param pVCpu The cross context virtual CPU structure.
6384 * @param pVmxTransient The VMX-transient structure.
6385 *
6386 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6387 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6388 * is due to an EPT violation, PML full or SPP-related event.
6389 *
6390 * @remarks No-long-jump zone!!!
6391 */
6392static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6393{
6394 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6395 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6396 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6397 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6398 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6399 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6400
6401 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6402 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6403 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6404 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6405 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6406 {
6407 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6408 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6409
6410 /*
6411 * If the event was a software interrupt (generated with INT n) or a software exception
6412 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6413 * can handle the VM-exit and continue guest execution which will re-execute the
6414 * instruction rather than re-injecting the exception, as that can cause premature
6415 * trips to ring-3 before injection and involve TRPM which currently has no way of
6416 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6417 * the problem).
6418 */
6419 IEMXCPTRAISE enmRaise;
6420 IEMXCPTRAISEINFO fRaiseInfo;
6421 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6422 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6423 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6424 {
6425 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6426 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6427 }
6428 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6429 {
6430 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6431 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6432 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6433
6434 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6435 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6436
6437 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6438
6439 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6440 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6441 {
6442 pVmxTransient->fVectoringPF = true;
6443 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6444 }
6445 }
6446 else
6447 {
6448 /*
6449 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6450 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6451 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6452 */
6453 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6454 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6455 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6456 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6457 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6458 }
6459
6460 /*
6461 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6462 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6463 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6464 * subsequent VM-entry would fail, see @bugref{7445}.
6465 *
6466 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6467 */
6468 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6469 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6470 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6471 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6472 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6473
6474 switch (enmRaise)
6475 {
6476 case IEMXCPTRAISE_CURRENT_XCPT:
6477 {
6478 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6479 Assert(rcStrict == VINF_SUCCESS);
6480 break;
6481 }
6482
6483 case IEMXCPTRAISE_PREV_EVENT:
6484 {
6485 uint32_t u32ErrCode;
6486 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6487 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6488 else
6489 u32ErrCode = 0;
6490
6491 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6492 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6493 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6494 pVCpu->cpum.GstCtx.cr2);
6495
6496 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6497 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6498 Assert(rcStrict == VINF_SUCCESS);
6499 break;
6500 }
6501
6502 case IEMXCPTRAISE_REEXEC_INSTR:
6503 Assert(rcStrict == VINF_SUCCESS);
6504 break;
6505
6506 case IEMXCPTRAISE_DOUBLE_FAULT:
6507 {
6508 /*
6509 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6510 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6511 */
6512 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6513 {
6514 pVmxTransient->fVectoringDoublePF = true;
6515 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6516 pVCpu->cpum.GstCtx.cr2));
6517 rcStrict = VINF_SUCCESS;
6518 }
6519 else
6520 {
6521 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6522 vmxHCSetPendingXcptDF(pVCpu);
6523 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6524 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6525 rcStrict = VINF_HM_DOUBLE_FAULT;
6526 }
6527 break;
6528 }
6529
6530 case IEMXCPTRAISE_TRIPLE_FAULT:
6531 {
6532 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6533 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6534 rcStrict = VINF_EM_RESET;
6535 break;
6536 }
6537
6538 case IEMXCPTRAISE_CPU_HANG:
6539 {
6540 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6541 rcStrict = VERR_EM_GUEST_CPU_HANG;
6542 break;
6543 }
6544
6545 default:
6546 {
6547 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6548 rcStrict = VERR_VMX_IPE_2;
6549 break;
6550 }
6551 }
6552 }
6553 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6554 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6555 {
6556 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6557 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6558 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6559 {
6560 /*
6561 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6562 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6563 * that virtual NMIs remain blocked until the IRET execution is completed.
6564 *
6565 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6566 */
6567 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6568 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6569 }
6570 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6571 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6572 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6573 {
6574 /*
6575 * Execution of IRET caused an EPT violation, page-modification log-full event or
6576 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6577 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6578 * that virtual NMIs remain blocked until the IRET execution is completed.
6579 *
6580 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6581 */
6582 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6583 {
6584 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6585 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6586 }
6587 }
6588 }
6589
6590 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6591 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6592 return rcStrict;
6593}
6594
6595
6596#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6597/**
6598 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6599 * guest attempting to execute a VMX instruction.
6600 *
6601 * @returns Strict VBox status code (i.e. informational status codes too).
6602 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6603 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6604 *
6605 * @param pVCpu The cross context virtual CPU structure.
6606 * @param uExitReason The VM-exit reason.
6607 *
6608 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6609 * @remarks No-long-jump zone!!!
6610 */
6611static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6612{
6613 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6614 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6615
6616 /*
6617 * The physical CPU would have already checked the CPU mode/code segment.
6618 * We shall just assert here for paranoia.
6619 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6620 */
6621 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6622 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6623 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6624
6625 if (uExitReason == VMX_EXIT_VMXON)
6626 {
6627 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6628
6629 /*
6630 * We check CR4.VMXE because it is required to be always set while in VMX operation
6631 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6632 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6633 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6634 */
6635 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6636 {
6637 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6638 vmxHCSetPendingXcptUD(pVCpu);
6639 return VINF_HM_PENDING_XCPT;
6640 }
6641 }
6642 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6643 {
6644 /*
6645 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6646 * (other than VMXON), we need to raise a #UD.
6647 */
6648 Log4Func(("Not in VMX root mode -> #UD\n"));
6649 vmxHCSetPendingXcptUD(pVCpu);
6650 return VINF_HM_PENDING_XCPT;
6651 }
6652
6653 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6654 return VINF_SUCCESS;
6655}
6656
6657
6658/**
6659 * Decodes the memory operand of an instruction that caused a VM-exit.
6660 *
6661 * The Exit qualification field provides the displacement field for memory
6662 * operand instructions, if any.
6663 *
6664 * @returns Strict VBox status code (i.e. informational status codes too).
6665 * @retval VINF_SUCCESS if the operand was successfully decoded.
6666 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6667 * operand.
6668 * @param pVCpu The cross context virtual CPU structure.
6669 * @param uExitInstrInfo The VM-exit instruction information field.
6670 * @param enmMemAccess The memory operand's access type (read or write).
6671 * @param GCPtrDisp The instruction displacement field, if any. For
6672 * RIP-relative addressing pass RIP + displacement here.
6673 * @param pGCPtrMem Where to store the effective destination memory address.
6674 *
6675 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6676 * virtual-8086 mode hence skips those checks while verifying if the
6677 * segment is valid.
6678 */
6679static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6680 PRTGCPTR pGCPtrMem)
6681{
6682 Assert(pGCPtrMem);
6683 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6684 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6685 | CPUMCTX_EXTRN_CR0);
6686
6687 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6688 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6689 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6690
6691 VMXEXITINSTRINFO ExitInstrInfo;
6692 ExitInstrInfo.u = uExitInstrInfo;
6693 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6694 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6695 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6696 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6697 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6698 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6699 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6700 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6701 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6702
6703 /*
6704 * Validate instruction information.
6705 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6706 */
6707 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6708 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6709 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6710 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6711 AssertLogRelMsgReturn(fIsMemOperand,
6712 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6713
6714 /*
6715 * Compute the complete effective address.
6716 *
6717 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6718 * See AMD spec. 4.5.2 "Segment Registers".
6719 */
6720 RTGCPTR GCPtrMem = GCPtrDisp;
6721 if (fBaseRegValid)
6722 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6723 if (fIdxRegValid)
6724 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6725
6726 RTGCPTR const GCPtrOff = GCPtrMem;
6727 if ( !fIsLongMode
6728 || iSegReg >= X86_SREG_FS)
6729 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6730 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6731
6732 /*
6733 * Validate effective address.
6734 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6735 */
6736 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6737 Assert(cbAccess > 0);
6738 if (fIsLongMode)
6739 {
6740 if (X86_IS_CANONICAL(GCPtrMem))
6741 {
6742 *pGCPtrMem = GCPtrMem;
6743 return VINF_SUCCESS;
6744 }
6745
6746 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6747 * "Data Limit Checks in 64-bit Mode". */
6748 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6749 vmxHCSetPendingXcptGP(pVCpu, 0);
6750 return VINF_HM_PENDING_XCPT;
6751 }
6752
6753 /*
6754 * This is a watered down version of iemMemApplySegment().
6755 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6756 * and segment CPL/DPL checks are skipped.
6757 */
6758 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6759 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6760 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6761
6762 /* Check if the segment is present and usable. */
6763 if ( pSel->Attr.n.u1Present
6764 && !pSel->Attr.n.u1Unusable)
6765 {
6766 Assert(pSel->Attr.n.u1DescType);
6767 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6768 {
6769 /* Check permissions for the data segment. */
6770 if ( enmMemAccess == VMXMEMACCESS_WRITE
6771 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6772 {
6773 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6774 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6775 return VINF_HM_PENDING_XCPT;
6776 }
6777
6778 /* Check limits if it's a normal data segment. */
6779 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6780 {
6781 if ( GCPtrFirst32 > pSel->u32Limit
6782 || GCPtrLast32 > pSel->u32Limit)
6783 {
6784 Log4Func(("Data segment limit exceeded. "
6785 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6786 GCPtrLast32, pSel->u32Limit));
6787 if (iSegReg == X86_SREG_SS)
6788 vmxHCSetPendingXcptSS(pVCpu, 0);
6789 else
6790 vmxHCSetPendingXcptGP(pVCpu, 0);
6791 return VINF_HM_PENDING_XCPT;
6792 }
6793 }
6794 else
6795 {
6796 /* Check limits if it's an expand-down data segment.
6797 Note! The upper boundary is defined by the B bit, not the G bit! */
6798 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6799 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6800 {
6801 Log4Func(("Expand-down data segment limit exceeded. "
6802 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6803 GCPtrLast32, pSel->u32Limit));
6804 if (iSegReg == X86_SREG_SS)
6805 vmxHCSetPendingXcptSS(pVCpu, 0);
6806 else
6807 vmxHCSetPendingXcptGP(pVCpu, 0);
6808 return VINF_HM_PENDING_XCPT;
6809 }
6810 }
6811 }
6812 else
6813 {
6814 /* Check permissions for the code segment. */
6815 if ( enmMemAccess == VMXMEMACCESS_WRITE
6816 || ( enmMemAccess == VMXMEMACCESS_READ
6817 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6818 {
6819 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6820 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6821 vmxHCSetPendingXcptGP(pVCpu, 0);
6822 return VINF_HM_PENDING_XCPT;
6823 }
6824
6825 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6826 if ( GCPtrFirst32 > pSel->u32Limit
6827 || GCPtrLast32 > pSel->u32Limit)
6828 {
6829 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6830 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6831 if (iSegReg == X86_SREG_SS)
6832 vmxHCSetPendingXcptSS(pVCpu, 0);
6833 else
6834 vmxHCSetPendingXcptGP(pVCpu, 0);
6835 return VINF_HM_PENDING_XCPT;
6836 }
6837 }
6838 }
6839 else
6840 {
6841 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6842 vmxHCSetPendingXcptGP(pVCpu, 0);
6843 return VINF_HM_PENDING_XCPT;
6844 }
6845
6846 *pGCPtrMem = GCPtrMem;
6847 return VINF_SUCCESS;
6848}
6849#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6850
6851
6852/**
6853 * VM-exit helper for LMSW.
6854 */
6855static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6856{
6857 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6858 AssertRCReturn(rc, rc);
6859
6860 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6861 AssertMsg( rcStrict == VINF_SUCCESS
6862 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6863
6864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6865 if (rcStrict == VINF_IEM_RAISED_XCPT)
6866 {
6867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6868 rcStrict = VINF_SUCCESS;
6869 }
6870
6871 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6872 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6873 return rcStrict;
6874}
6875
6876
6877/**
6878 * VM-exit helper for CLTS.
6879 */
6880static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6881{
6882 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6883 AssertRCReturn(rc, rc);
6884
6885 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6886 AssertMsg( rcStrict == VINF_SUCCESS
6887 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6888
6889 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6890 if (rcStrict == VINF_IEM_RAISED_XCPT)
6891 {
6892 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6893 rcStrict = VINF_SUCCESS;
6894 }
6895
6896 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6897 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6898 return rcStrict;
6899}
6900
6901
6902/**
6903 * VM-exit helper for MOV from CRx (CRx read).
6904 */
6905static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6906{
6907 Assert(iCrReg < 16);
6908 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6909
6910 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6911 AssertRCReturn(rc, rc);
6912
6913 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6914 AssertMsg( rcStrict == VINF_SUCCESS
6915 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6916
6917 if (iGReg == X86_GREG_xSP)
6918 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6919 else
6920 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6921#ifdef VBOX_WITH_STATISTICS
6922 switch (iCrReg)
6923 {
6924 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6925 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6926 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6927 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6928 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6929 }
6930#endif
6931 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6932 return rcStrict;
6933}
6934
6935
6936/**
6937 * VM-exit helper for MOV to CRx (CRx write).
6938 */
6939static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6940{
6941 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6942
6943 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6944 AssertMsg( rcStrict == VINF_SUCCESS
6945 || rcStrict == VINF_IEM_RAISED_XCPT
6946 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6947
6948 switch (iCrReg)
6949 {
6950 case 0:
6951 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6952 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6953 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6954 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6955 break;
6956
6957 case 2:
6958 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6959 /* Nothing to do here, CR2 it's not part of the VMCS. */
6960 break;
6961
6962 case 3:
6963 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6964 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6965 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6966 break;
6967
6968 case 4:
6969 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6970 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6971#ifndef IN_NEM_DARWIN
6972 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6973 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6974#else
6975 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6976#endif
6977 break;
6978
6979 case 8:
6980 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6981 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6982 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6983 break;
6984
6985 default:
6986 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6987 break;
6988 }
6989
6990 if (rcStrict == VINF_IEM_RAISED_XCPT)
6991 {
6992 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6993 rcStrict = VINF_SUCCESS;
6994 }
6995 return rcStrict;
6996}
6997
6998
6999/**
7000 * VM-exit exception handler for \#PF (Page-fault exception).
7001 *
7002 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7003 */
7004static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7005{
7006 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7007 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7008
7009#ifndef IN_NEM_DARWIN
7010 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7011 if (!VM_IS_VMX_NESTED_PAGING(pVM))
7012 { /* likely */ }
7013 else
7014#endif
7015 {
7016#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
7017 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
7018#endif
7019 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
7020 if (!pVmxTransient->fVectoringDoublePF)
7021 {
7022 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7023 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
7024 }
7025 else
7026 {
7027 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7028 Assert(!pVmxTransient->fIsNestedGuest);
7029 vmxHCSetPendingXcptDF(pVCpu);
7030 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
7031 }
7032 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7033 return VINF_SUCCESS;
7034 }
7035
7036 Assert(!pVmxTransient->fIsNestedGuest);
7037
7038 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
7039 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
7040 if (pVmxTransient->fVectoringPF)
7041 {
7042 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7043 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7044 }
7045
7046 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7047 AssertRCReturn(rc, rc);
7048
7049 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
7050 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
7051
7052 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
7053 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
7054
7055 Log4Func(("#PF: rc=%Rrc\n", rc));
7056 if (rc == VINF_SUCCESS)
7057 {
7058 /*
7059 * This is typically a shadow page table sync or a MMIO instruction. But we may have
7060 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
7061 */
7062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7063 TRPMResetTrap(pVCpu);
7064 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
7065 return rc;
7066 }
7067
7068 if (rc == VINF_EM_RAW_GUEST_TRAP)
7069 {
7070 if (!pVmxTransient->fVectoringDoublePF)
7071 {
7072 /* It's a guest page fault and needs to be reflected to the guest. */
7073 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
7074 TRPMResetTrap(pVCpu);
7075 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
7076 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7077 uGstErrorCode, pVmxTransient->uExitQual);
7078 }
7079 else
7080 {
7081 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7082 TRPMResetTrap(pVCpu);
7083 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
7084 vmxHCSetPendingXcptDF(pVCpu);
7085 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7086 }
7087
7088 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7089 return VINF_SUCCESS;
7090 }
7091
7092 TRPMResetTrap(pVCpu);
7093 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
7094 return rc;
7095}
7096
7097
7098/**
7099 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
7100 *
7101 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7102 */
7103static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7104{
7105 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7106 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
7107
7108 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7109 AssertRCReturn(rc, rc);
7110
7111 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
7112 {
7113 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7114 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7115
7116 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7117 * provides VM-exit instruction length. If this causes problem later,
7118 * disassemble the instruction like it's done on AMD-V. */
7119 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7120 AssertRCReturn(rc2, rc2);
7121 return rc;
7122 }
7123
7124 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7125 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7126 return VINF_SUCCESS;
7127}
7128
7129
7130/**
7131 * VM-exit exception handler for \#BP (Breakpoint exception).
7132 *
7133 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7134 */
7135static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7136{
7137 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7138 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7139
7140 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7141 AssertRCReturn(rc, rc);
7142
7143 VBOXSTRICTRC rcStrict;
7144 if (!pVmxTransient->fIsNestedGuest)
7145 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7146 else
7147 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7148
7149 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7150 {
7151 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7152 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7153 rcStrict = VINF_SUCCESS;
7154 }
7155
7156 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7157 return rcStrict;
7158}
7159
7160
7161/**
7162 * VM-exit helper for split-lock access triggered \#AC exceptions.
7163 */
7164static VBOXSTRICTRC vmxHCHandleSplitLockAcXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7165{
7166 /*
7167 * Check for debug/trace events and import state accordingly.
7168 */
7169 if (!pVmxTransient->fIsNestedGuest)
7170 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7171 else
7172 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatNestedExitACSplitLock);
7173 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7174 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7175#ifndef IN_NEM_DARWIN
7176 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7177#endif
7178 )
7179 {
7180 if (pVM->cCpus == 1)
7181 {
7182#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7183 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7184 HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7185#else
7186 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7187 HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7188#endif
7189 AssertRCReturn(rc, rc);
7190 }
7191 }
7192 else
7193 {
7194 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7195 HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7196 AssertRCReturn(rc, rc);
7197
7198 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7199
7200 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7201 {
7202 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7203 if (rcStrict != VINF_SUCCESS)
7204 return rcStrict;
7205 }
7206 }
7207
7208 /*
7209 * Emulate the instruction.
7210 *
7211 * We have to ignore the LOCK prefix here as we must not retrigger the
7212 * detection on the host. This isn't all that satisfactory, though...
7213 */
7214 if (pVM->cCpus == 1)
7215 {
7216 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7217 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7218
7219 /** @todo For SMP configs we should do a rendezvous here. */
7220 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7221 if (rcStrict == VINF_SUCCESS)
7222#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7223 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7224 HM_CHANGED_GUEST_RIP
7225 | HM_CHANGED_GUEST_RFLAGS
7226 | HM_CHANGED_GUEST_GPRS_MASK
7227 | HM_CHANGED_GUEST_CS
7228 | HM_CHANGED_GUEST_SS);
7229#else
7230 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7231#endif
7232 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7233 {
7234 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7235 rcStrict = VINF_SUCCESS;
7236 }
7237 return rcStrict;
7238 }
7239 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7240 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7241 return VINF_EM_EMULATE_SPLIT_LOCK;
7242}
7243
7244
7245/**
7246 * VM-exit exception handler for \#AC (Alignment-check exception).
7247 *
7248 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7249 */
7250static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7251{
7252 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7253
7254 /*
7255 * Detect #ACs caused by host having enabled split-lock detection.
7256 * Emulate such instructions.
7257 */
7258 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7259 AssertRCReturn(rc, rc);
7260 /** @todo detect split lock in cpu feature? */
7261 /** @todo r=ramshankar: is cpu feature detection really necessary since we are able
7262 * to detect the split-lock \#AC condition without it? More so since the
7263 * feature isn't cleanly detectable, see @bugref{10318#c125}. */
7264 if (vmxHCIsSplitLockAcXcpt(pVCpu))
7265 return vmxHCHandleSplitLockAcXcpt(pVCpu, pVmxTransient);
7266
7267 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7268 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7269 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7270
7271 /* Re-inject it. We'll detect any nesting before getting here. */
7272 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7273 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7274 return VINF_SUCCESS;
7275}
7276
7277
7278/**
7279 * VM-exit exception handler for \#DB (Debug exception).
7280 *
7281 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7282 */
7283static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7284{
7285 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7286 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7287
7288 /*
7289 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7290 */
7291 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7292
7293 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7294 uint64_t const uDR6 = X86_DR6_INIT_VAL
7295 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7296 | X86_DR6_BD | X86_DR6_BS));
7297 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7298
7299 int rc;
7300 if (!pVmxTransient->fIsNestedGuest)
7301 {
7302 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7303
7304 /*
7305 * Prevents stepping twice over the same instruction when the guest is stepping using
7306 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7307 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7308 */
7309 if ( rc == VINF_EM_DBG_STEPPED
7310 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7311 {
7312 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7313 rc = VINF_EM_RAW_GUEST_TRAP;
7314 }
7315 }
7316 else
7317 rc = VINF_EM_RAW_GUEST_TRAP;
7318 Log6Func(("rc=%Rrc\n", rc));
7319 if (rc == VINF_EM_RAW_GUEST_TRAP)
7320 {
7321 /*
7322 * The exception was for the guest. Update DR6, DR7.GD and
7323 * IA32_DEBUGCTL.LBR before forwarding it.
7324 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7325 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7326 */
7327#ifndef IN_NEM_DARWIN
7328 VMMRZCallRing3Disable(pVCpu);
7329 HM_DISABLE_PREEMPT(pVCpu);
7330
7331 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7332 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7333 if (CPUMIsGuestDebugStateActive(pVCpu))
7334 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7335
7336 HM_RESTORE_PREEMPT();
7337 VMMRZCallRing3Enable(pVCpu);
7338#else
7339 /** @todo */
7340#endif
7341
7342 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7343 AssertRCReturn(rc, rc);
7344
7345 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7346 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7347
7348 /* Paranoia. */
7349 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7350 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7351
7352 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7353 AssertRC(rc);
7354
7355 /*
7356 * Raise #DB in the guest.
7357 *
7358 * It is important to reflect exactly what the VM-exit gave us (preserving the
7359 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7360 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7361 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7362 *
7363 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7364 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7365 */
7366 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7367 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7368 return VINF_SUCCESS;
7369 }
7370
7371 /*
7372 * Not a guest trap, must be a hypervisor related debug event then.
7373 * Update DR6 in case someone is interested in it.
7374 */
7375 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7376 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7377 CPUMSetHyperDR6(pVCpu, uDR6);
7378
7379 return rc;
7380}
7381
7382
7383/**
7384 * Hacks its way around the lovely mesa driver's backdoor accesses.
7385 *
7386 * @sa hmR0SvmHandleMesaDrvGp.
7387 */
7388static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7389{
7390 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7391 RT_NOREF(pCtx);
7392
7393 /* For now we'll just skip the instruction. */
7394 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7395}
7396
7397
7398/**
7399 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7400 * backdoor logging w/o checking what it is running inside.
7401 *
7402 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7403 * backdoor port and magic numbers loaded in registers.
7404 *
7405 * @returns true if it is, false if it isn't.
7406 * @sa hmR0SvmIsMesaDrvGp.
7407 */
7408DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7409{
7410 /* 0xed: IN eAX,dx */
7411 uint8_t abInstr[1];
7412 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7413 return false;
7414
7415 /* Check that it is #GP(0). */
7416 if (pVmxTransient->uExitIntErrorCode != 0)
7417 return false;
7418
7419 /* Check magic and port. */
7420 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7421 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7422 if (pCtx->rax != UINT32_C(0x564d5868))
7423 return false;
7424 if (pCtx->dx != UINT32_C(0x5658))
7425 return false;
7426
7427 /* Flat ring-3 CS. */
7428 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7429 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7430 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7431 if (pCtx->cs.Attr.n.u2Dpl != 3)
7432 return false;
7433 if (pCtx->cs.u64Base != 0)
7434 return false;
7435
7436 /* Check opcode. */
7437 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7438 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7439 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7440 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7441 if (RT_FAILURE(rc))
7442 return false;
7443 if (abInstr[0] != 0xed)
7444 return false;
7445
7446 return true;
7447}
7448
7449
7450/**
7451 * VM-exit exception handler for \#GP (General-protection exception).
7452 *
7453 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7454 */
7455static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7456{
7457 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7458 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7459
7460 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7461 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7462#ifndef IN_NEM_DARWIN
7463 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7464 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7465 { /* likely */ }
7466 else
7467#endif
7468 {
7469#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7470# ifndef IN_NEM_DARWIN
7471 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7472# else
7473 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7474# endif
7475#endif
7476 /*
7477 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7478 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7479 */
7480 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7481 AssertRCReturn(rc, rc);
7482 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7483 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7484
7485 if ( pVmxTransient->fIsNestedGuest
7486 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7487 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7488 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7489 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7490 else
7491 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7492 return rc;
7493 }
7494
7495#ifndef IN_NEM_DARWIN
7496 Assert(CPUMIsGuestInRealModeEx(pCtx));
7497 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7498 Assert(!pVmxTransient->fIsNestedGuest);
7499
7500 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7501 AssertRCReturn(rc, rc);
7502
7503 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7504 if (rcStrict == VINF_SUCCESS)
7505 {
7506 if (!CPUMIsGuestInRealModeEx(pCtx))
7507 {
7508 /*
7509 * The guest is no longer in real-mode, check if we can continue executing the
7510 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7511 */
7512 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7513 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7514 {
7515 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7516 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7517 }
7518 else
7519 {
7520 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7521 rcStrict = VINF_EM_RESCHEDULE;
7522 }
7523 }
7524 else
7525 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7526 }
7527 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7528 {
7529 rcStrict = VINF_SUCCESS;
7530 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7531 }
7532 return VBOXSTRICTRC_VAL(rcStrict);
7533#endif
7534}
7535
7536
7537/**
7538 * VM-exit exception handler for \#DE (Divide Error).
7539 *
7540 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7541 */
7542static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7543{
7544 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7545 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7546
7547 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7548 AssertRCReturn(rc, rc);
7549
7550 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7551 {
7552 rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
7553 Assert(rc == VINF_SUCCESS /* restart instr */ || rc == VERR_NOT_FOUND /* deliver exception */);
7554 }
7555 else
7556 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7557
7558 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7559 if (RT_FAILURE(rc))
7560 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7561 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7562 return VINF_SUCCESS;
7563}
7564
7565
7566/**
7567 * VM-exit exception handler wrapper for all other exceptions that are not handled
7568 * by a specific handler.
7569 *
7570 * This simply re-injects the exception back into the VM without any special
7571 * processing.
7572 *
7573 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7574 */
7575static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7576{
7577 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7578
7579#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7580# ifndef IN_NEM_DARWIN
7581 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7582 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7583 ("uVector=%#x u32XcptBitmap=%#X32\n",
7584 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7585 NOREF(pVmcsInfo);
7586# endif
7587#endif
7588
7589 /*
7590 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7591 * would have been handled while checking exits due to event delivery.
7592 */
7593 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7594
7595#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7596 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7597 AssertRCReturn(rc, rc);
7598 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7599#endif
7600
7601#ifdef VBOX_WITH_STATISTICS
7602 switch (uVector)
7603 {
7604 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7605 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7606 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7607 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7608 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7609 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7610 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7611 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7612 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7613 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7614 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7615 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7616 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7617 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7618 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7619 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7620 default:
7621 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7622 break;
7623 }
7624#endif
7625
7626 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7627 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7628 NOREF(uVector);
7629
7630 /* Re-inject the original exception into the guest. */
7631 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7632 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7633 return VINF_SUCCESS;
7634}
7635
7636
7637/**
7638 * VM-exit exception handler for all exceptions (except NMIs!).
7639 *
7640 * @remarks This may be called for both guests and nested-guests. Take care to not
7641 * make assumptions and avoid doing anything that is not relevant when
7642 * executing a nested-guest (e.g., Mesa driver hacks).
7643 */
7644static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7645{
7646 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7647
7648 /*
7649 * If this VM-exit occurred while delivering an event through the guest IDT, take
7650 * action based on the return code and additional hints (e.g. for page-faults)
7651 * that will be updated in the VMX transient structure.
7652 */
7653 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7654 if (rcStrict == VINF_SUCCESS)
7655 {
7656 /*
7657 * If an exception caused a VM-exit due to delivery of an event, the original
7658 * event may have to be re-injected into the guest. We shall reinject it and
7659 * continue guest execution. However, page-fault is a complicated case and
7660 * needs additional processing done in vmxHCExitXcptPF().
7661 */
7662 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7663 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7664 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7665 || uVector == X86_XCPT_PF)
7666 {
7667 switch (uVector)
7668 {
7669 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7670 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7671 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7672 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7673 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7674 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7675 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7676 default:
7677 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7678 }
7679 }
7680 /* else: inject pending event before resuming guest execution. */
7681 }
7682 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7683 {
7684 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7685 rcStrict = VINF_SUCCESS;
7686 }
7687
7688 return rcStrict;
7689}
7690/** @} */
7691
7692
7693/** @name VM-exit handlers.
7694 * @{
7695 */
7696/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7697/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7698/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7699
7700/**
7701 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7702 */
7703HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7704{
7705 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7706 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7707
7708#ifndef IN_NEM_DARWIN
7709 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7710 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7711 return VINF_SUCCESS;
7712 return VINF_EM_RAW_INTERRUPT;
7713#else
7714 return VINF_SUCCESS;
7715#endif
7716}
7717
7718
7719/**
7720 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7721 * VM-exit.
7722 */
7723HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7724{
7725 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7726 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7727
7728 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7729
7730 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7731 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7732 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7733
7734 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7735 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7736 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7737 NOREF(pVmcsInfo);
7738
7739 VBOXSTRICTRC rcStrict;
7740 switch (uExitIntType)
7741 {
7742#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7743 /*
7744 * Host physical NMIs:
7745 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7746 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7747 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7748 *
7749 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7750 * See Intel spec. 27.5.5 "Updating Non-Register State".
7751 */
7752 case VMX_EXIT_INT_INFO_TYPE_NMI:
7753 {
7754 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7755 break;
7756 }
7757#endif
7758
7759 /*
7760 * Privileged software exceptions (#DB from ICEBP),
7761 * Software exceptions (#BP and #OF),
7762 * Hardware exceptions:
7763 * Process the required exceptions and resume guest execution if possible.
7764 */
7765 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7766 Assert(uVector == X86_XCPT_DB);
7767 RT_FALL_THRU();
7768 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7769 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7770 RT_FALL_THRU();
7771 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7772 {
7773 NOREF(uVector);
7774 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7775 | HMVMX_READ_EXIT_INSTR_LEN
7776 | HMVMX_READ_IDT_VECTORING_INFO
7777 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7778 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7779 break;
7780 }
7781
7782 default:
7783 {
7784 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7785 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7786 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7787 break;
7788 }
7789 }
7790
7791 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7792 return rcStrict;
7793}
7794
7795
7796/**
7797 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7798 */
7799HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7800{
7801 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7802
7803 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7804 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7805 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7806
7807 /* Evaluate and deliver pending events and resume guest execution. */
7808 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7809 return VINF_SUCCESS;
7810}
7811
7812
7813/**
7814 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7815 */
7816HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7817{
7818 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7819
7820 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7821 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7822 {
7823 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7824 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7825 }
7826
7827 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7828
7829 /*
7830 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7831 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7832 */
7833 uint32_t fIntrState;
7834 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7835 AssertRC(rc);
7836 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7837 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7838 {
7839 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7840
7841 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7842 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7843 AssertRC(rc);
7844 }
7845
7846 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */
7847 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7848
7849 /* Evaluate and deliver pending events and resume guest execution. */
7850 return VINF_SUCCESS;
7851}
7852
7853
7854/**
7855 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7856 */
7857HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7858{
7859 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7860 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7861}
7862
7863
7864/**
7865 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7866 */
7867HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7868{
7869 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7870 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7871}
7872
7873
7874/**
7875 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7876 */
7877HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7878{
7879 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7880
7881 /*
7882 * Get the state we need and update the exit history entry.
7883 */
7884 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7885 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7886 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7887 AssertRCReturn(rc, rc);
7888
7889 VBOXSTRICTRC rcStrict;
7890 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7891 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7892 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7893 if (!pExitRec)
7894 {
7895 /*
7896 * Regular CPUID instruction execution.
7897 */
7898 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7899 if (rcStrict == VINF_SUCCESS)
7900 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7901 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7902 {
7903 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7904 rcStrict = VINF_SUCCESS;
7905 }
7906 }
7907 else
7908 {
7909 /*
7910 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7911 */
7912 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7913 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7914 AssertRCReturn(rc2, rc2);
7915
7916 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7917 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7918
7919 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7920 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7921
7922 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7923 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7924 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7925 }
7926 return rcStrict;
7927}
7928
7929
7930/**
7931 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7932 */
7933HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7934{
7935 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7936
7937 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7938 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7939 AssertRCReturn(rc, rc);
7940
7941 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7942 return VINF_EM_RAW_EMULATE_INSTR;
7943
7944 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7945 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7946}
7947
7948
7949/**
7950 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7951 */
7952HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7953{
7954 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7955
7956 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7957 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7958 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7959 AssertRCReturn(rc, rc);
7960
7961 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7962 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7963 {
7964 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7965 we must reset offsetting on VM-entry. See @bugref{6634}. */
7966 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7967 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7968 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7969 }
7970 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7971 {
7972 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7973 rcStrict = VINF_SUCCESS;
7974 }
7975 return rcStrict;
7976}
7977
7978
7979/**
7980 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7981 */
7982HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7983{
7984 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7985
7986 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7987 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7988 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7989 AssertRCReturn(rc, rc);
7990
7991 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7992 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7993 {
7994 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7995 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7996 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7997 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7998 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7999 }
8000 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8001 {
8002 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8003 rcStrict = VINF_SUCCESS;
8004 }
8005 return rcStrict;
8006}
8007
8008
8009/**
8010 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
8011 */
8012HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8013{
8014 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8015
8016 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8017 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8018 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8019 AssertRCReturn(rc, rc);
8020
8021 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
8022 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8023 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8024 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8025 {
8026 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8027 rcStrict = VINF_SUCCESS;
8028 }
8029 return rcStrict;
8030}
8031
8032
8033/**
8034 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
8035 */
8036HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8037{
8038 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8039
8040 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
8041 if (EMAreHypercallInstructionsEnabled(pVCpu))
8042 {
8043 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8044 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
8045 | CPUMCTX_EXTRN_RFLAGS
8046 | CPUMCTX_EXTRN_CR0
8047 | CPUMCTX_EXTRN_SS
8048 | CPUMCTX_EXTRN_CS
8049 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
8050 AssertRCReturn(rc, rc);
8051
8052 /* Perform the hypercall. */
8053 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8054 if (rcStrict == VINF_SUCCESS)
8055 {
8056 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8057 AssertRCReturn(rc, rc);
8058 }
8059 else
8060 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
8061 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
8062 || RT_FAILURE(rcStrict));
8063
8064 /* If the hypercall changes anything other than guest's general-purpose registers,
8065 we would need to reload the guest changed bits here before VM-entry. */
8066 }
8067 else
8068 Log4Func(("Hypercalls not enabled\n"));
8069
8070 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
8071 if (RT_FAILURE(rcStrict))
8072 {
8073 vmxHCSetPendingXcptUD(pVCpu);
8074 rcStrict = VINF_SUCCESS;
8075 }
8076
8077 return rcStrict;
8078}
8079
8080
8081/**
8082 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8083 */
8084HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8085{
8086 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8087#ifndef IN_NEM_DARWIN
8088 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
8089#endif
8090
8091 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8092 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8093 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8094 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8095 AssertRCReturn(rc, rc);
8096
8097 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
8098
8099 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
8100 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8101 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8102 {
8103 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8104 rcStrict = VINF_SUCCESS;
8105 }
8106 else
8107 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8108 VBOXSTRICTRC_VAL(rcStrict)));
8109 return rcStrict;
8110}
8111
8112
8113/**
8114 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8115 */
8116HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8117{
8118 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8119
8120 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8121 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8122 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8123 AssertRCReturn(rc, rc);
8124
8125 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8126 if (rcStrict == VINF_SUCCESS)
8127 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8128 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8129 {
8130 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8131 rcStrict = VINF_SUCCESS;
8132 }
8133
8134 return rcStrict;
8135}
8136
8137
8138/**
8139 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8140 */
8141HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8142{
8143 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8144
8145 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8146 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8147 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8148 AssertRCReturn(rc, rc);
8149
8150 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8151 if (RT_SUCCESS(rcStrict))
8152 {
8153 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8154 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8155 rcStrict = VINF_SUCCESS;
8156 }
8157
8158 return rcStrict;
8159}
8160
8161
8162/**
8163 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8164 * VM-exit.
8165 */
8166HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8167{
8168 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8169 return VINF_EM_RESET;
8170}
8171
8172
8173/**
8174 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8175 */
8176HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8177{
8178 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8179
8180 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8181 AssertRCReturn(rc, rc);
8182
8183 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8184 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8185 rc = VINF_SUCCESS;
8186 else
8187 rc = VINF_EM_HALT;
8188
8189 if (rc != VINF_SUCCESS)
8190 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8191 return rc;
8192}
8193
8194
8195#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8196/**
8197 * VM-exit handler for instructions that result in a \#UD exception delivered to
8198 * the guest.
8199 */
8200HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8201{
8202 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8203 vmxHCSetPendingXcptUD(pVCpu);
8204 return VINF_SUCCESS;
8205}
8206#endif
8207
8208
8209/**
8210 * VM-exit handler for expiry of the VMX-preemption timer.
8211 */
8212HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8213{
8214 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8215
8216 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8217 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8218Log12(("vmxHCExitPreemptTimer:\n"));
8219
8220 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8221 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8222 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8223 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8224 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8225}
8226
8227
8228/**
8229 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8230 */
8231HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8232{
8233 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8234
8235 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8236 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8237 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8238 AssertRCReturn(rc, rc);
8239
8240 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8241 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8242 : HM_CHANGED_RAISED_XCPT_MASK);
8243
8244#ifndef IN_NEM_DARWIN
8245 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8246 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8247 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8248 {
8249 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8250 hmR0VmxUpdateStartVmFunction(pVCpu);
8251 }
8252#endif
8253
8254 return rcStrict;
8255}
8256
8257
8258/**
8259 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8260 */
8261HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8262{
8263 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8264
8265 /** @todo Enable the new code after finding a reliably guest test-case. */
8266#if 1
8267 return VERR_EM_INTERPRETER;
8268#else
8269 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8270 | HMVMX_READ_EXIT_INSTR_INFO
8271 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8272 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8273 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8274 AssertRCReturn(rc, rc);
8275
8276 /* Paranoia. Ensure this has a memory operand. */
8277 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8278
8279 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8280 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8281 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8282 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8283
8284 RTGCPTR GCPtrDesc;
8285 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8286
8287 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8288 GCPtrDesc, uType);
8289 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8290 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8291 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8292 {
8293 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8294 rcStrict = VINF_SUCCESS;
8295 }
8296 return rcStrict;
8297#endif
8298}
8299
8300
8301/**
8302 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8303 * VM-exit.
8304 */
8305HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8306{
8307 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8308 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8309 AssertRCReturn(rc, rc);
8310
8311 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8312 if (RT_FAILURE(rc))
8313 return rc;
8314
8315 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8316 NOREF(uInvalidReason);
8317
8318#ifdef VBOX_STRICT
8319 uint32_t fIntrState;
8320 uint64_t u64Val;
8321 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8322 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8323 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8324
8325 Log4(("uInvalidReason %u\n", uInvalidReason));
8326 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8327 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8328 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8329
8330 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8331 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8332 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8333 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8334 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8335 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8336 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8337 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8338 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8339 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8340 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8341 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8342# ifndef IN_NEM_DARWIN
8343 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8344 {
8345 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8346 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8347 }
8348
8349 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8350# endif
8351#endif
8352
8353 return VERR_VMX_INVALID_GUEST_STATE;
8354}
8355
8356/**
8357 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8358 */
8359HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8360{
8361 /*
8362 * Cumulative notes of all recognized but unexpected VM-exits.
8363 *
8364 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8365 * nested-paging is used.
8366 *
8367 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8368 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8369 * this function (and thereby stop VM execution) for handling such instructions.
8370 *
8371 *
8372 * VMX_EXIT_INIT_SIGNAL:
8373 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8374 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8375 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8376 *
8377 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8378 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8379 * See Intel spec. "23.8 Restrictions on VMX operation".
8380 *
8381 * VMX_EXIT_SIPI:
8382 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8383 * activity state is used. We don't make use of it as our guests don't have direct
8384 * access to the host local APIC.
8385 *
8386 * See Intel spec. 25.3 "Other Causes of VM-exits".
8387 *
8388 * VMX_EXIT_IO_SMI:
8389 * VMX_EXIT_SMI:
8390 * This can only happen if we support dual-monitor treatment of SMI, which can be
8391 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8392 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8393 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8394 *
8395 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8396 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8397 *
8398 * VMX_EXIT_ERR_MSR_LOAD:
8399 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8400 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8401 * execution.
8402 *
8403 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8404 *
8405 * VMX_EXIT_ERR_MACHINE_CHECK:
8406 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8407 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8408 * #MC exception abort class exception is raised. We thus cannot assume a
8409 * reasonable chance of continuing any sort of execution and we bail.
8410 *
8411 * See Intel spec. 15.1 "Machine-check Architecture".
8412 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8413 *
8414 * VMX_EXIT_PML_FULL:
8415 * VMX_EXIT_VIRTUALIZED_EOI:
8416 * VMX_EXIT_APIC_WRITE:
8417 * We do not currently support any of these features and thus they are all unexpected
8418 * VM-exits.
8419 *
8420 * VMX_EXIT_GDTR_IDTR_ACCESS:
8421 * VMX_EXIT_LDTR_TR_ACCESS:
8422 * VMX_EXIT_RDRAND:
8423 * VMX_EXIT_RSM:
8424 * VMX_EXIT_VMFUNC:
8425 * VMX_EXIT_ENCLS:
8426 * VMX_EXIT_RDSEED:
8427 * VMX_EXIT_XSAVES:
8428 * VMX_EXIT_XRSTORS:
8429 * VMX_EXIT_UMWAIT:
8430 * VMX_EXIT_TPAUSE:
8431 * VMX_EXIT_LOADIWKEY:
8432 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8433 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8434 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8435 *
8436 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8437 */
8438 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8439 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8440 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8441}
8442
8443
8444/**
8445 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8446 */
8447HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8448{
8449 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8450
8451 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8452
8453 /** @todo Optimize this: We currently drag in the whole MSR state
8454 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8455 * MSRs required. That would require changes to IEM and possibly CPUM too.
8456 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8457 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8458 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8459 int rc;
8460 switch (idMsr)
8461 {
8462 default:
8463 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8464 __FUNCTION__);
8465 AssertRCReturn(rc, rc);
8466 break;
8467 case MSR_K8_FS_BASE:
8468 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8469 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8470 AssertRCReturn(rc, rc);
8471 break;
8472 case MSR_K8_GS_BASE:
8473 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8474 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8475 AssertRCReturn(rc, rc);
8476 break;
8477 }
8478
8479 Log4Func(("ecx=%#RX32\n", idMsr));
8480
8481#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8482 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8483 {
8484 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8485 && idMsr != MSR_K6_EFER)
8486 {
8487 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8488 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8489 }
8490 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8491 {
8492 Assert(pVmcsInfo->pvMsrBitmap);
8493 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8494 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8495 {
8496 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8497 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8498 }
8499 }
8500 }
8501#endif
8502
8503 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8504 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8505 if (rcStrict == VINF_SUCCESS)
8506 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8507 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8508 {
8509 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8510 rcStrict = VINF_SUCCESS;
8511 }
8512 else
8513 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8514 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8515
8516 return rcStrict;
8517}
8518
8519
8520/**
8521 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8522 */
8523HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8524{
8525 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8526
8527 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8528
8529 /*
8530 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8531 * Although we don't need to fetch the base as it will be overwritten shortly, while
8532 * loading guest-state we would also load the entire segment register including limit
8533 * and attributes and thus we need to load them here.
8534 */
8535 /** @todo Optimize this: We currently drag in the whole MSR state
8536 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8537 * MSRs required. That would require changes to IEM and possibly CPUM too.
8538 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8539 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8540 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8541 int rc;
8542 switch (idMsr)
8543 {
8544 default:
8545 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8546 __FUNCTION__);
8547 AssertRCReturn(rc, rc);
8548 break;
8549
8550 case MSR_K8_FS_BASE:
8551 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8552 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8553 AssertRCReturn(rc, rc);
8554 break;
8555 case MSR_K8_GS_BASE:
8556 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8557 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8558 AssertRCReturn(rc, rc);
8559 break;
8560 }
8561 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8562
8563 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8564 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8565
8566 if (rcStrict == VINF_SUCCESS)
8567 {
8568 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8569
8570 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8571 if ( idMsr == MSR_IA32_APICBASE
8572 || ( idMsr >= MSR_IA32_X2APIC_START
8573 && idMsr <= MSR_IA32_X2APIC_END))
8574 {
8575 /*
8576 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8577 * When full APIC register virtualization is implemented we'll have to make
8578 * sure APIC state is saved from the VMCS before IEM changes it.
8579 */
8580 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8581 }
8582 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8583 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8584 else if (idMsr == MSR_K6_EFER)
8585 {
8586 /*
8587 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8588 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8589 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8590 */
8591 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8592 }
8593
8594 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8595 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8596 {
8597 switch (idMsr)
8598 {
8599 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8600 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8601 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8602 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8603 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8604 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8605 default:
8606 {
8607#ifndef IN_NEM_DARWIN
8608 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8609 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8610 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8611 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8612#else
8613 AssertMsgFailed(("TODO\n"));
8614#endif
8615 break;
8616 }
8617 }
8618 }
8619#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8620 else
8621 {
8622 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8623 switch (idMsr)
8624 {
8625 case MSR_IA32_SYSENTER_CS:
8626 case MSR_IA32_SYSENTER_EIP:
8627 case MSR_IA32_SYSENTER_ESP:
8628 case MSR_K8_FS_BASE:
8629 case MSR_K8_GS_BASE:
8630 {
8631 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8632 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8633 }
8634
8635 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8636 default:
8637 {
8638 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8639 {
8640 /* EFER MSR writes are always intercepted. */
8641 if (idMsr != MSR_K6_EFER)
8642 {
8643 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8644 idMsr));
8645 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8646 }
8647 }
8648
8649 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8650 {
8651 Assert(pVmcsInfo->pvMsrBitmap);
8652 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8653 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8654 {
8655 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8656 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8657 }
8658 }
8659 break;
8660 }
8661 }
8662 }
8663#endif /* VBOX_STRICT */
8664 }
8665 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8666 {
8667 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8668 rcStrict = VINF_SUCCESS;
8669 }
8670 else
8671 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8672 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8673
8674 return rcStrict;
8675}
8676
8677
8678/**
8679 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8680 */
8681HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8682{
8683 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8684
8685 /** @todo The guest has likely hit a contended spinlock. We might want to
8686 * poke a schedule different guest VCPU. */
8687 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8688 if (RT_SUCCESS(rc))
8689 return VINF_EM_RAW_INTERRUPT;
8690
8691 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8692 return rc;
8693}
8694
8695
8696/**
8697 * VM-exit handler for when the TPR value is lowered below the specified
8698 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8699 */
8700HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8701{
8702 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8703 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8704
8705 /*
8706 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8707 * We'll re-evaluate pending interrupts and inject them before the next VM
8708 * entry so we can just continue execution here.
8709 */
8710 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8711 return VINF_SUCCESS;
8712}
8713
8714
8715/**
8716 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8717 * VM-exit.
8718 *
8719 * @retval VINF_SUCCESS when guest execution can continue.
8720 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8721 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8722 * incompatible guest state for VMX execution (real-on-v86 case).
8723 */
8724HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8725{
8726 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8727 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8728
8729 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8730 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8731 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8732
8733 VBOXSTRICTRC rcStrict;
8734 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8735 uint64_t const uExitQual = pVmxTransient->uExitQual;
8736 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8737 switch (uAccessType)
8738 {
8739 /*
8740 * MOV to CRx.
8741 */
8742 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8743 {
8744 /*
8745 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8746 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8747 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8748 * PAE PDPTEs as well.
8749 */
8750 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8751 AssertRCReturn(rc, rc);
8752
8753 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8754#ifndef IN_NEM_DARWIN
8755 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8756#endif
8757 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8758 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8759
8760 /*
8761 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8762 * - When nested paging isn't used.
8763 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8764 * - We are executing in the VM debug loop.
8765 */
8766#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8767# ifndef IN_NEM_DARWIN
8768 Assert( iCrReg != 3
8769 || !VM_IS_VMX_NESTED_PAGING(pVM)
8770 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8771 || pVCpu->hmr0.s.fUsingDebugLoop);
8772# else
8773 Assert( iCrReg != 3
8774 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8775# endif
8776#endif
8777
8778 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8779 Assert( iCrReg != 8
8780 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8781
8782 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8783 AssertMsg( rcStrict == VINF_SUCCESS
8784 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8785
8786#ifndef IN_NEM_DARWIN
8787 /*
8788 * This is a kludge for handling switches back to real mode when we try to use
8789 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8790 * deal with special selector values, so we have to return to ring-3 and run
8791 * there till the selector values are V86 mode compatible.
8792 *
8793 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8794 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8795 * this function.
8796 */
8797 if ( iCrReg == 0
8798 && rcStrict == VINF_SUCCESS
8799 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8800 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8801 && (uOldCr0 & X86_CR0_PE)
8802 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8803 {
8804 /** @todo Check selectors rather than returning all the time. */
8805 Assert(!pVmxTransient->fIsNestedGuest);
8806 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8807 rcStrict = VINF_EM_RESCHEDULE_REM;
8808 }
8809#endif
8810
8811 break;
8812 }
8813
8814 /*
8815 * MOV from CRx.
8816 */
8817 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8818 {
8819 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8820 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8821
8822 /*
8823 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8824 * - When nested paging isn't used.
8825 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8826 * - We are executing in the VM debug loop.
8827 */
8828#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8829# ifndef IN_NEM_DARWIN
8830 Assert( iCrReg != 3
8831 || !VM_IS_VMX_NESTED_PAGING(pVM)
8832 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8833 || pVCpu->hmr0.s.fLeaveDone);
8834# else
8835 Assert( iCrReg != 3
8836 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8837# endif
8838#endif
8839
8840 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8841 Assert( iCrReg != 8
8842 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8843
8844 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8845 break;
8846 }
8847
8848 /*
8849 * CLTS (Clear Task-Switch Flag in CR0).
8850 */
8851 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8852 {
8853 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8854 break;
8855 }
8856
8857 /*
8858 * LMSW (Load Machine-Status Word into CR0).
8859 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8860 */
8861 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8862 {
8863 RTGCPTR GCPtrEffDst;
8864 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8865 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8866 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8867 if (fMemOperand)
8868 {
8869 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8870 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8871 }
8872 else
8873 GCPtrEffDst = NIL_RTGCPTR;
8874 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8875 break;
8876 }
8877
8878 default:
8879 {
8880 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8881 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8882 }
8883 }
8884
8885 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8886 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8887 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8888
8889 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8890 NOREF(pVM);
8891 return rcStrict;
8892}
8893
8894
8895/**
8896 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8897 * VM-exit.
8898 */
8899HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8900{
8901 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8902 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8903
8904 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8905 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8906 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8907 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8908#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8909 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8910 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8911 AssertRCReturn(rc, rc);
8912
8913 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8914 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8915 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8916 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8917 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8918 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8919 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8920 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8921
8922 /*
8923 * Update exit history to see if this exit can be optimized.
8924 */
8925 VBOXSTRICTRC rcStrict;
8926 PCEMEXITREC pExitRec = NULL;
8927 if ( !fGstStepping
8928 && !fDbgStepping)
8929 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8930 !fIOString
8931 ? !fIOWrite
8932 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8933 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8934 : !fIOWrite
8935 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8936 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8937 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8938 if (!pExitRec)
8939 {
8940 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8941 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8942
8943 uint32_t const cbValue = s_aIOSizes[uIOSize];
8944 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8945 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8946 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8947 if (fIOString)
8948 {
8949 /*
8950 * INS/OUTS - I/O String instruction.
8951 *
8952 * Use instruction-information if available, otherwise fall back on
8953 * interpreting the instruction.
8954 */
8955 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8956 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8957 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8958 if (fInsOutsInfo)
8959 {
8960 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8961 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8962 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8963 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8964 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8965 if (fIOWrite)
8966 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8967 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8968 else
8969 {
8970 /*
8971 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8972 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8973 * See Intel Instruction spec. for "INS".
8974 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8975 */
8976 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8977 }
8978 }
8979 else
8980 rcStrict = IEMExecOne(pVCpu);
8981
8982 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8983 fUpdateRipAlready = true;
8984 }
8985 else
8986 {
8987 /*
8988 * IN/OUT - I/O instruction.
8989 */
8990 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8991 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8992 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8993 if (fIOWrite)
8994 {
8995 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8996 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8997#ifndef IN_NEM_DARWIN
8998 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8999 && !pCtx->eflags.Bits.u1TF)
9000 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
9001#endif
9002 }
9003 else
9004 {
9005 rcStrict = VERR_GCM_NOT_HANDLED;
9006 if (GCMIsInterceptingIOPortRead(pVCpu, uIOPort, cbValue))
9007 {
9008 rcStrict = GCMInterceptedIOPortRead(pVCpu, pCtx, uIOPort, cbValue);
9009 if (rcStrict == VINF_GCM_HANDLED_ADVANCE_RIP || rcStrict == VINF_GCM_HANDLED)
9010 {
9011 /* ASSUMES we don't need to update fCtxChanged when regular GPRs change here. */
9012 fUpdateRipAlready = rcStrict == VINF_GCM_HANDLED;
9013 if (rcStrict == VINF_GCM_HANDLED)
9014 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
9015 rcStrict = VINF_SUCCESS;
9016 }
9017 else
9018 Assert(rcStrict == VERR_GCM_NOT_HANDLED);
9019 }
9020
9021 if (rcStrict == VERR_GCM_NOT_HANDLED)
9022 {
9023 uint32_t u32Result = 0;
9024 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
9025 if (IOM_SUCCESS(rcStrict))
9026 {
9027 /* Save result of I/O IN instr. in AL/AX/EAX. */
9028 /** @todo r=bird: 32-bit op size should clear high bits of rax! */
9029 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
9030 }
9031#ifndef IN_NEM_DARWIN
9032 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9033 && !pCtx->eflags.Bits.u1TF)
9034 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
9035#endif
9036 }
9037 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
9038 }
9039 }
9040
9041 if (IOM_SUCCESS(rcStrict))
9042 {
9043 if (!fUpdateRipAlready)
9044 {
9045 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
9046 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
9047 }
9048
9049 /*
9050 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
9051 * while booting Fedora 17 64-bit guest.
9052 *
9053 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
9054 */
9055 if (fIOString)
9056 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
9057
9058 /*
9059 * If any I/O breakpoints are armed, we need to check if one triggered
9060 * and take appropriate action.
9061 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9062 */
9063#if 1
9064 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
9065#else
9066 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
9067 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
9068 AssertRCReturn(rc, rc);
9069#endif
9070
9071 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9072 * execution engines about whether hyper BPs and such are pending. */
9073 uint32_t const uDr7 = pCtx->dr[7];
9074 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9075 && X86_DR7_ANY_RW_IO(uDr7)
9076 && (pCtx->cr4 & X86_CR4_DE))
9077 || DBGFBpIsHwIoArmed(pVM)))
9078 {
9079 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
9080
9081#ifndef IN_NEM_DARWIN
9082 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
9083 VMMRZCallRing3Disable(pVCpu);
9084 HM_DISABLE_PREEMPT(pVCpu);
9085
9086 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
9087
9088 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
9089 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9090 {
9091 /* Raise #DB. */
9092 if (fIsGuestDbgActive)
9093 ASMSetDR6(pCtx->dr[6]);
9094 if (pCtx->dr[7] != uDr7)
9095 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
9096
9097 vmxHCSetPendingXcptDB(pVCpu);
9098 }
9099 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
9100 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
9101 else if ( rcStrict2 != VINF_SUCCESS
9102 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9103 rcStrict = rcStrict2;
9104 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
9105
9106 HM_RESTORE_PREEMPT();
9107 VMMRZCallRing3Enable(pVCpu);
9108#else
9109 /** @todo */
9110#endif
9111 }
9112 }
9113
9114#ifdef VBOX_STRICT
9115 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9116 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
9117 Assert(!fIOWrite);
9118 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9119 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
9120 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
9121 Assert(fIOWrite);
9122 else
9123 {
9124# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9125 * statuses, that the VMM device and some others may return. See
9126 * IOM_SUCCESS() for guidance. */
9127 AssertMsg( RT_FAILURE(rcStrict)
9128 || rcStrict == VINF_SUCCESS
9129 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9130 || rcStrict == VINF_EM_DBG_BREAKPOINT
9131 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9132 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9133# endif
9134 }
9135#endif
9136 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9137 }
9138 else
9139 {
9140 /*
9141 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9142 */
9143 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9144 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9145 AssertRCReturn(rc2, rc2);
9146 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9147 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9148 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9149 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9150 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9151 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9152
9153 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9154 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9155
9156 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9157 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9158 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9159 }
9160 return rcStrict;
9161}
9162
9163
9164/**
9165 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9166 * VM-exit.
9167 */
9168HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9169{
9170 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9171
9172 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9173 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9174 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9175 {
9176 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9177 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9178 {
9179 uint32_t uErrCode;
9180 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9181 {
9182 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9183 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9184 }
9185 else
9186 uErrCode = 0;
9187
9188 RTGCUINTPTR GCPtrFaultAddress;
9189 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9190 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9191 else
9192 GCPtrFaultAddress = 0;
9193
9194 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9195
9196 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9197 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9198
9199 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9200 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9201 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9202 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9203 }
9204 }
9205
9206 /* Fall back to the interpreter to emulate the task-switch. */
9207 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9208 return VERR_EM_INTERPRETER;
9209}
9210
9211
9212/**
9213 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9214 */
9215HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9216{
9217 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9218
9219 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9220 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9221 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9222 AssertRC(rc);
9223 return VINF_EM_DBG_STEPPED;
9224}
9225
9226
9227/**
9228 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9229 */
9230HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9231{
9232 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9233 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9234
9235 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9236 | HMVMX_READ_EXIT_INSTR_LEN
9237 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9238 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9239 | HMVMX_READ_IDT_VECTORING_INFO
9240 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9241
9242 /*
9243 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9244 */
9245 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9246 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9247 {
9248 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9249 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9250 {
9251 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9252 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9253 }
9254 }
9255 else
9256 {
9257 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9258 return rcStrict;
9259 }
9260
9261 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9262 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9263 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9264 AssertRCReturn(rc, rc);
9265
9266 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9267 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9268 switch (uAccessType)
9269 {
9270#ifndef IN_NEM_DARWIN
9271 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9272 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9273 {
9274 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9275 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9276 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9277
9278 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9279 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9280 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9281 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9282 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9283
9284 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9285 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9286 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9287 if ( rcStrict == VINF_SUCCESS
9288 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9289 || rcStrict == VERR_PAGE_NOT_PRESENT)
9290 {
9291 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9292 | HM_CHANGED_GUEST_APIC_TPR);
9293 rcStrict = VINF_SUCCESS;
9294 }
9295 break;
9296 }
9297#else
9298 /** @todo */
9299#endif
9300
9301 default:
9302 {
9303 Log4Func(("uAccessType=%#x\n", uAccessType));
9304 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9305 break;
9306 }
9307 }
9308
9309 if (rcStrict != VINF_SUCCESS)
9310 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9311 return rcStrict;
9312}
9313
9314
9315/**
9316 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9317 * VM-exit.
9318 */
9319HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9320{
9321 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9322 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9323
9324 /*
9325 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9326 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9327 * must emulate the MOV DRx access.
9328 */
9329 if (!pVmxTransient->fIsNestedGuest)
9330 {
9331 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9332 if ( pVmxTransient->fWasGuestDebugStateActive
9333#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9334 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9335#endif
9336 )
9337 {
9338 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9339 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9340 }
9341
9342 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9343 && !pVmxTransient->fWasHyperDebugStateActive)
9344 {
9345 Assert(!DBGFIsStepping(pVCpu));
9346 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9347
9348 /* Whether we disable intercepting MOV DRx instructions and resume
9349 the current one, or emulate it and keep intercepting them is
9350 configurable. Though it usually comes down to whether there are
9351 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9352#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9353 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9354#else
9355 bool const fResumeInstruction = true;
9356#endif
9357 if (fResumeInstruction)
9358 {
9359 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9360 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9361 AssertRC(rc);
9362 }
9363
9364#ifndef IN_NEM_DARWIN
9365 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9366 VMMRZCallRing3Disable(pVCpu);
9367 HM_DISABLE_PREEMPT(pVCpu);
9368
9369 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9370 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9371 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9372
9373 HM_RESTORE_PREEMPT();
9374 VMMRZCallRing3Enable(pVCpu);
9375#else
9376 CPUMR3NemActivateGuestDebugState(pVCpu);
9377 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9378 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9379#endif
9380
9381 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9382 if (fResumeInstruction)
9383 {
9384#ifdef VBOX_WITH_STATISTICS
9385 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9386 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9387 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9388 else
9389 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9390#endif
9391 return VINF_SUCCESS;
9392 }
9393 }
9394 }
9395
9396 /*
9397 * Import state. We must have DR7 loaded here as it's always consulted,
9398 * both for reading and writing. The other debug registers are never
9399 * exported as such.
9400 */
9401 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9402 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9403 | CPUMCTX_EXTRN_GPRS_MASK
9404 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9405 AssertRCReturn(rc, rc);
9406
9407 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9408 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9409 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9410 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9411
9412 VBOXSTRICTRC rcStrict;
9413 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9414 {
9415 /*
9416 * Write DRx register.
9417 */
9418 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9419 AssertMsg( rcStrict == VINF_SUCCESS
9420 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9421
9422 if (rcStrict == VINF_SUCCESS)
9423 {
9424 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9425 * kept it for now to avoid breaking something non-obvious. */
9426 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9427 | HM_CHANGED_GUEST_DR7);
9428 /* Update the DR6 register if guest debug state is active, otherwise we'll
9429 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9430 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9431 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9432 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9433 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9434 }
9435 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9436 {
9437 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9438 rcStrict = VINF_SUCCESS;
9439 }
9440
9441 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9442 }
9443 else
9444 {
9445 /*
9446 * Read DRx register into a general purpose register.
9447 */
9448 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9449 AssertMsg( rcStrict == VINF_SUCCESS
9450 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9451
9452 if (rcStrict == VINF_SUCCESS)
9453 {
9454 if (iGReg == X86_GREG_xSP)
9455 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9456 | HM_CHANGED_GUEST_RSP);
9457 else
9458 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9459 }
9460 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9461 {
9462 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9463 rcStrict = VINF_SUCCESS;
9464 }
9465
9466 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9467 }
9468
9469 return rcStrict;
9470}
9471
9472
9473/**
9474 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9475 * Conditional VM-exit.
9476 */
9477HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9478{
9479 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9480
9481#ifndef IN_NEM_DARWIN
9482 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9483
9484 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9485 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9486 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9487 | HMVMX_READ_IDT_VECTORING_INFO
9488 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9489 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9490
9491 /*
9492 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9493 */
9494 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9495 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9496 {
9497 /*
9498 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9499 * instruction emulation to inject the original event. Otherwise, injecting the original event
9500 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9501 */
9502 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9503 { /* likely */ }
9504 else
9505 {
9506 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9507# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9508 /** @todo NSTVMX: Think about how this should be handled. */
9509 if (pVmxTransient->fIsNestedGuest)
9510 return VERR_VMX_IPE_3;
9511# endif
9512 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9513 }
9514 }
9515 else
9516 {
9517 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9518 return rcStrict;
9519 }
9520
9521 /*
9522 * Get sufficient state and update the exit history entry.
9523 */
9524 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9525 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9526 AssertRCReturn(rc, rc);
9527
9528 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9529 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9530 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9531 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9532 if (!pExitRec)
9533 {
9534 /*
9535 * If we succeed, resume guest execution.
9536 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9537 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9538 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9539 * weird case. See @bugref{6043}.
9540 */
9541 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9542/** @todo bird: We can probably just go straight to IOM here and assume that
9543 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9544 * well. However, we need to address that aliasing workarounds that
9545 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9546 *
9547 * Might also be interesting to see if we can get this done more or
9548 * less locklessly inside IOM. Need to consider the lookup table
9549 * updating and use a bit more carefully first (or do all updates via
9550 * rendezvous) */
9551 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9552 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9553 if ( rcStrict == VINF_SUCCESS
9554 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9555 || rcStrict == VERR_PAGE_NOT_PRESENT)
9556 {
9557 /* Successfully handled MMIO operation. */
9558 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9559 | HM_CHANGED_GUEST_APIC_TPR);
9560 rcStrict = VINF_SUCCESS;
9561 }
9562 }
9563 else
9564 {
9565 /*
9566 * Frequent exit or something needing probing. Call EMHistoryExec.
9567 */
9568 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL, IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9569 AssertRCReturn(rc2, rc2);
9570 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9571 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9572
9573 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9574 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9575
9576 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9577 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9578 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9579 }
9580 return rcStrict;
9581#else
9582 AssertFailed();
9583 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9584#endif
9585}
9586
9587
9588/**
9589 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9590 * VM-exit.
9591 */
9592HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9593{
9594 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9595#ifndef IN_NEM_DARWIN
9596 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9597
9598 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9599 | HMVMX_READ_EXIT_INSTR_LEN
9600 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9601 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9602 | HMVMX_READ_IDT_VECTORING_INFO
9603 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9604 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9605
9606 /*
9607 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9608 */
9609 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9610 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9611 {
9612 /*
9613 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9614 * we shall resolve the nested #PF and re-inject the original event.
9615 */
9616 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9617 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9618 }
9619 else
9620 {
9621 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9622 return rcStrict;
9623 }
9624
9625 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9626 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9627 AssertRCReturn(rc, rc);
9628
9629 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9630 uint64_t const uExitQual = pVmxTransient->uExitQual;
9631 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9632
9633 RTGCUINT uErrorCode = 0;
9634 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9635 uErrorCode |= X86_TRAP_PF_ID;
9636 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9637 uErrorCode |= X86_TRAP_PF_RW;
9638 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9639 uErrorCode |= X86_TRAP_PF_P;
9640
9641 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9642 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9643
9644 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9645
9646 /*
9647 * Handle the pagefault trap for the nested shadow table.
9648 */
9649 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9650 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9651 TRPMResetTrap(pVCpu);
9652
9653 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9654 if ( rcStrict == VINF_SUCCESS
9655 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9656 || rcStrict == VERR_PAGE_NOT_PRESENT)
9657 {
9658 /* Successfully synced our nested page tables. */
9659 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9660 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9661 return VINF_SUCCESS;
9662 }
9663 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9664 return rcStrict;
9665
9666#else /* IN_NEM_DARWIN */
9667 PVM pVM = pVCpu->CTX_SUFF(pVM);
9668 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9669 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9670 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9671 vmxHCImportGuestRip(pVCpu);
9672 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9673
9674 /*
9675 * Ask PGM for information about the given GCPhys. We need to check if we're
9676 * out of sync first.
9677 */
9678 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9679 false,
9680 false };
9681 PGMPHYSNEMPAGEINFO Info;
9682 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9683 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9684 if (RT_SUCCESS(rc))
9685 {
9686 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9687 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9688 {
9689 if (State.fCanResume)
9690 {
9691 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9692 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9693 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9694 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9695 State.fDidSomething ? "" : " no-change"));
9696 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9697 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9698 return VINF_SUCCESS;
9699 }
9700 }
9701
9702 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9703 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9704 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9705 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9706 State.fDidSomething ? "" : " no-change"));
9707 }
9708 else
9709 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9710 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9711 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9712
9713 /*
9714 * Emulate the memory access, either access handler or special memory.
9715 */
9716 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9717 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9718 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9719 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9720 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9721
9722 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9723 AssertRCReturn(rc, rc);
9724
9725 VBOXSTRICTRC rcStrict;
9726 if (!pExitRec)
9727 rcStrict = IEMExecOne(pVCpu);
9728 else
9729 {
9730 /* Frequent access or probing. */
9731 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9732 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9733 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9734 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9735 }
9736
9737 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9738
9739 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9740 return rcStrict;
9741#endif /* IN_NEM_DARWIN */
9742}
9743
9744#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9745
9746/**
9747 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9748 */
9749HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9750{
9751 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9752
9753 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9754 | HMVMX_READ_EXIT_INSTR_INFO
9755 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9756 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9757 | CPUMCTX_EXTRN_SREG_MASK
9758 | CPUMCTX_EXTRN_HWVIRT
9759 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9760 AssertRCReturn(rc, rc);
9761
9762 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9763
9764 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9765 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9766
9767 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9768 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9769 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9770 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9771 {
9772 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9773 rcStrict = VINF_SUCCESS;
9774 }
9775 return rcStrict;
9776}
9777
9778
9779/**
9780 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9781 */
9782HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9783{
9784 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9785
9786 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9787 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9788 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9789 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9790 AssertRCReturn(rc, rc);
9791
9792 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9793
9794 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9795 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9796 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9797 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9798 {
9799 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9800 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9801 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9802 }
9803 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9804 return rcStrict;
9805}
9806
9807
9808/**
9809 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9810 */
9811HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9812{
9813 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9814
9815 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9816 | HMVMX_READ_EXIT_INSTR_INFO
9817 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9818 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9819 | CPUMCTX_EXTRN_SREG_MASK
9820 | CPUMCTX_EXTRN_HWVIRT
9821 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9822 AssertRCReturn(rc, rc);
9823
9824 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9825
9826 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9827 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9828
9829 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9830 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9831 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9832 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9833 {
9834 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9835 rcStrict = VINF_SUCCESS;
9836 }
9837 return rcStrict;
9838}
9839
9840
9841/**
9842 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9843 */
9844HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9845{
9846 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9847
9848 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9849 | HMVMX_READ_EXIT_INSTR_INFO
9850 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9851 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9852 | CPUMCTX_EXTRN_SREG_MASK
9853 | CPUMCTX_EXTRN_HWVIRT
9854 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9855 AssertRCReturn(rc, rc);
9856
9857 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9858
9859 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9860 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9861
9862 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9863 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9865 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9866 {
9867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9868 rcStrict = VINF_SUCCESS;
9869 }
9870 return rcStrict;
9871}
9872
9873
9874/**
9875 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9876 */
9877HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9878{
9879 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9880
9881 /*
9882 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9883 * thus might not need to import the shadow VMCS state, it's safer just in case
9884 * code elsewhere dares look at unsynced VMCS fields.
9885 */
9886 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9887 | HMVMX_READ_EXIT_INSTR_INFO
9888 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9889 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9890 | CPUMCTX_EXTRN_SREG_MASK
9891 | CPUMCTX_EXTRN_HWVIRT
9892 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9893 AssertRCReturn(rc, rc);
9894
9895 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9896
9897 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9898 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9899 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9900
9901 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9902 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9903 {
9904 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9905
9906# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9907 /* Try for exit optimization. This is on the following instruction
9908 because it would be a waste of time to have to reinterpret the
9909 already decoded vmwrite instruction. */
9910 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9911 if (pExitRec)
9912 {
9913 /* Frequent access or probing. */
9914 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9915 AssertRCReturn(rc, rc);
9916
9917 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9918 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9919 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9920 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9921 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9922 }
9923# endif
9924 }
9925 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9926 {
9927 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9928 rcStrict = VINF_SUCCESS;
9929 }
9930 return rcStrict;
9931}
9932
9933
9934/**
9935 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9936 */
9937HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9938{
9939 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9940
9941 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9942 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9943 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9944 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9945 AssertRCReturn(rc, rc);
9946
9947 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9948
9949 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9950 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9951 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9952 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9953 {
9954 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9955 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9956 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9957 }
9958 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9959 return rcStrict;
9960}
9961
9962
9963/**
9964 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9965 */
9966HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9967{
9968 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9969
9970 /*
9971 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9972 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9973 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9974 */
9975 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9976 | HMVMX_READ_EXIT_INSTR_INFO
9977 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9978 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9979 | CPUMCTX_EXTRN_SREG_MASK
9980 | CPUMCTX_EXTRN_HWVIRT
9981 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9982 AssertRCReturn(rc, rc);
9983
9984 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9985
9986 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9987 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9988 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9989
9990 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9991 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9992 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9993 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9994 {
9995 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9996 rcStrict = VINF_SUCCESS;
9997 }
9998 return rcStrict;
9999}
10000
10001
10002/**
10003 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
10004 */
10005HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10006{
10007 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10008
10009 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10010 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
10011 | CPUMCTX_EXTRN_HWVIRT
10012 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10013 AssertRCReturn(rc, rc);
10014
10015 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10016
10017 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
10018 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10019 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
10020 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10021 {
10022 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10023 rcStrict = VINF_SUCCESS;
10024 }
10025 return rcStrict;
10026}
10027
10028
10029/**
10030 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
10031 */
10032HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10033{
10034 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10035
10036 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10037 | HMVMX_READ_EXIT_INSTR_INFO
10038 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10039 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10040 | CPUMCTX_EXTRN_SREG_MASK
10041 | CPUMCTX_EXTRN_HWVIRT
10042 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10043 AssertRCReturn(rc, rc);
10044
10045 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10046
10047 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10048 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10049
10050 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
10051 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10052 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
10053 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10054 {
10055 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10056 rcStrict = VINF_SUCCESS;
10057 }
10058 return rcStrict;
10059}
10060
10061
10062/**
10063 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
10064 */
10065HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10066{
10067 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10068
10069 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10070 | HMVMX_READ_EXIT_INSTR_INFO
10071 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10072 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10073 | CPUMCTX_EXTRN_SREG_MASK
10074 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10075 AssertRCReturn(rc, rc);
10076
10077 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10078
10079 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10080 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10081
10082 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
10083 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10084 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10085 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10086 {
10087 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10088 rcStrict = VINF_SUCCESS;
10089 }
10090 return rcStrict;
10091}
10092
10093
10094# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10095/**
10096 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
10097 */
10098HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10099{
10100 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10101
10102 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10103 | HMVMX_READ_EXIT_INSTR_INFO
10104 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10105 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10106 | CPUMCTX_EXTRN_SREG_MASK
10107 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10108 AssertRCReturn(rc, rc);
10109
10110 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10111
10112 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10113 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10114
10115 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
10116 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10117 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10118 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10119 {
10120 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10121 rcStrict = VINF_SUCCESS;
10122 }
10123 return rcStrict;
10124}
10125# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10126#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10127/** @} */
10128
10129
10130#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10131/** @name Nested-guest VM-exit handlers.
10132 * @{
10133 */
10134/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10135/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10136/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10137
10138/**
10139 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10140 * Conditional VM-exit.
10141 */
10142HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10143{
10144 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10145
10146 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10147
10148 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10149 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10150 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10151
10152 switch (uExitIntType)
10153 {
10154# ifndef IN_NEM_DARWIN
10155 /*
10156 * Physical NMIs:
10157 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10158 */
10159 case VMX_EXIT_INT_INFO_TYPE_NMI:
10160 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10161# endif
10162
10163 /*
10164 * Hardware exceptions,
10165 * Software exceptions,
10166 * Privileged software exceptions:
10167 * Figure out if the exception must be delivered to the guest or the nested-guest.
10168 */
10169 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10170 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10171 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10172 {
10173 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10174 | HMVMX_READ_EXIT_INSTR_LEN
10175 | HMVMX_READ_IDT_VECTORING_INFO
10176 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10177
10178 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10179 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
10180 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, uVector, pVmxTransient->uExitIntErrorCode))
10181 {
10182 /*
10183 * Split-lock triggered #ACs should not be injected into the nested-guest
10184 * since we don't support split-lock detection for nested-guests yet.
10185 */
10186 if ( uVector == X86_XCPT_AC
10187 && uExitIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
10188 {
10189 int const rc = vmxHCImportGuestState<HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10190 AssertRCReturn(rc, rc);
10191 if (vmxHCIsSplitLockAcXcpt(pVCpu))
10192 {
10193 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10194 if ( rcStrict == VINF_SUCCESS
10195 && !VCPU_2_VMXSTATE(pVCpu).Event.fPending)
10196 return vmxHCHandleSplitLockAcXcpt(pVCpu, pVmxTransient);
10197 if (rcStrict == VINF_HM_DOUBLE_FAULT)
10198 {
10199 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
10200 rcStrict = VINF_SUCCESS;
10201 }
10202 return rcStrict;
10203 }
10204 }
10205
10206 /* Exit qualification is required for debug and page-fault exceptions. */
10207 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10208
10209 /*
10210 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10211 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10212 * length. However, if delivery of a software interrupt, software exception or privileged
10213 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10214 */
10215 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10216 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10217 pVmxTransient->uExitIntErrorCode,
10218 pVmxTransient->uIdtVectoringInfo,
10219 pVmxTransient->uIdtVectoringErrorCode);
10220#ifdef DEBUG_ramshankar
10221 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10222 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10223 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10224 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10225 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10226 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10227#endif
10228 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10229 }
10230
10231 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10232 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10233 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10234 }
10235
10236 /*
10237 * Software interrupts:
10238 * VM-exits cannot be caused by software interrupts.
10239 *
10240 * External interrupts:
10241 * This should only happen when "acknowledge external interrupts on VM-exit"
10242 * control is set. However, we never set this when executing a guest or
10243 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10244 * the guest.
10245 */
10246 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10247 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10248 default:
10249 {
10250 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10251 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10252 }
10253 }
10254}
10255
10256
10257/**
10258 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10259 * Unconditional VM-exit.
10260 */
10261HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10262{
10263 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10264 return IEMExecVmxVmexitTripleFault(pVCpu);
10265}
10266
10267
10268/**
10269 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10270 */
10271HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10272{
10273 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10274
10275 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10276 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10277 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10278}
10279
10280
10281/**
10282 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10283 */
10284HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10285{
10286 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10287
10288 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10289 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10290 return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
10291}
10292
10293
10294/**
10295 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10296 * Unconditional VM-exit.
10297 */
10298HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10299{
10300 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10301
10302 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10303 | HMVMX_READ_EXIT_INSTR_LEN
10304 | HMVMX_READ_IDT_VECTORING_INFO
10305 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10306
10307 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10308 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10309 pVmxTransient->uIdtVectoringErrorCode);
10310 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10311}
10312
10313
10314/**
10315 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10316 */
10317HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10318{
10319 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10320
10321 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10322 {
10323 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10324 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10325 }
10326 return vmxHCExitHlt(pVCpu, pVmxTransient);
10327}
10328
10329
10330/**
10331 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10332 */
10333HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10334{
10335 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10336
10337 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10338 {
10339 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10340 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10341 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10342 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10343 }
10344 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10345}
10346
10347
10348/**
10349 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10350 */
10351HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10352{
10353 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10354
10355 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10356 {
10357 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10358 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10359 }
10360 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10361}
10362
10363
10364/**
10365 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10366 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10367 */
10368HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10369{
10370 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10371
10372 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10373 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10374
10375 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10376
10377 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10378 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10379 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10380
10381 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10382 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10383 u64VmcsField &= UINT64_C(0xffffffff);
10384
10385 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10386 {
10387 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10388 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10389 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10390 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10391 }
10392
10393 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10394 return vmxHCExitVmread(pVCpu, pVmxTransient);
10395 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10396}
10397
10398
10399/**
10400 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10401 */
10402HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10403{
10404 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10405
10406 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10407 {
10408 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10409 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10410 }
10411
10412 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10413}
10414
10415
10416/**
10417 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10418 * Conditional VM-exit.
10419 */
10420HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10421{
10422 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10423
10424 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10425 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10426
10427 VBOXSTRICTRC rcStrict;
10428 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10429 switch (uAccessType)
10430 {
10431 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10432 {
10433 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10434 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10435 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10436 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10437
10438 bool fIntercept;
10439 switch (iCrReg)
10440 {
10441 case 0:
10442 case 4:
10443 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10444 break;
10445
10446 case 3:
10447 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10448 break;
10449
10450 case 8:
10451 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10452 break;
10453
10454 default:
10455 fIntercept = false;
10456 break;
10457 }
10458 if (fIntercept)
10459 {
10460 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10461 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10462 }
10463 else
10464 {
10465 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10466 AssertRCReturn(rc, rc);
10467 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10468 }
10469 break;
10470 }
10471
10472 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10473 {
10474 /*
10475 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10476 * CR2 reads do not cause a VM-exit.
10477 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10478 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10479 */
10480 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10481 if ( iCrReg == 3
10482 || iCrReg == 8)
10483 {
10484 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10485 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10486 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10487 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10488 {
10489 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10490 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10491 }
10492 else
10493 {
10494 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10495 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10496 }
10497 }
10498 else
10499 {
10500 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10501 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10502 }
10503 break;
10504 }
10505
10506 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10507 {
10508 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10509 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10510 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10511 if ( (uGstHostMask & X86_CR0_TS)
10512 && (uReadShadow & X86_CR0_TS))
10513 {
10514 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10515 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10516 }
10517 else
10518 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10519 break;
10520 }
10521
10522 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10523 {
10524 RTGCPTR GCPtrEffDst;
10525 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10526 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10527 if (fMemOperand)
10528 {
10529 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10530 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10531 }
10532 else
10533 GCPtrEffDst = NIL_RTGCPTR;
10534
10535 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10536 {
10537 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10538 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10539 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10540 }
10541 else
10542 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10543 break;
10544 }
10545
10546 default:
10547 {
10548 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10549 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10550 }
10551 }
10552
10553 if (rcStrict == VINF_IEM_RAISED_XCPT)
10554 {
10555 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10556 rcStrict = VINF_SUCCESS;
10557 }
10558 return rcStrict;
10559}
10560
10561
10562/**
10563 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10564 * Conditional VM-exit.
10565 */
10566HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10567{
10568 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10569
10570 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10571 {
10572 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10573 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10574 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10575 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10576 }
10577 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10578}
10579
10580
10581/**
10582 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10583 * Conditional VM-exit.
10584 */
10585HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10586{
10587 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10588
10589 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10590
10591 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10592 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10593 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10594
10595 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10596 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10597 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10598 {
10599 /*
10600 * IN/OUT instruction:
10601 * - Provides VM-exit instruction length.
10602 *
10603 * INS/OUTS instruction:
10604 * - Provides VM-exit instruction length.
10605 * - Provides Guest-linear address.
10606 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10607 */
10608 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10609 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10610
10611 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10612 pVmxTransient->ExitInstrInfo.u = 0;
10613 pVmxTransient->uGuestLinearAddr = 0;
10614
10615 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10616 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10617 if (fIOString)
10618 {
10619 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10620 if (fVmxInsOutsInfo)
10621 {
10622 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10623 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10624 }
10625 }
10626
10627 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10628 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10629 }
10630 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10631}
10632
10633
10634/**
10635 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10636 */
10637HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10638{
10639 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10640
10641 uint32_t fMsrpm;
10642 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10643 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10644 else
10645 fMsrpm = VMXMSRPM_EXIT_RD;
10646
10647 if (fMsrpm & VMXMSRPM_EXIT_RD)
10648 {
10649 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10650 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10651 }
10652 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10653}
10654
10655
10656/**
10657 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10658 */
10659HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10660{
10661 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10662
10663 uint32_t fMsrpm;
10664 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10665 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10666 else
10667 fMsrpm = VMXMSRPM_EXIT_WR;
10668
10669 if (fMsrpm & VMXMSRPM_EXIT_WR)
10670 {
10671 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10672 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10673 }
10674 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10675}
10676
10677
10678/**
10679 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10680 */
10681HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10682{
10683 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10684
10685 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10686 {
10687 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10688 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10689 }
10690 return vmxHCExitMwait(pVCpu, pVmxTransient);
10691}
10692
10693
10694/**
10695 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10696 * VM-exit.
10697 */
10698HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10699{
10700 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10701
10702 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10703 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10704 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10705 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10706}
10707
10708
10709/**
10710 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10711 */
10712HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10713{
10714 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10715
10716 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10717 {
10718 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10719 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10720 }
10721 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10722}
10723
10724
10725/**
10726 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10727 */
10728HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10729{
10730 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10731
10732 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10733 * PAUSE when executing a nested-guest? If it does not, we would not need
10734 * to check for the intercepts here. Just call VM-exit... */
10735
10736 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10737 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10738 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10739 {
10740 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10741 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10742 }
10743 return vmxHCExitPause(pVCpu, pVmxTransient);
10744}
10745
10746
10747/**
10748 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10749 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10750 */
10751HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10752{
10753 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10754
10755 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10756 {
10757 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10758 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10759 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10760 }
10761 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10762}
10763
10764
10765/**
10766 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10767 * VM-exit.
10768 */
10769HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10770{
10771 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10772
10773 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10774 | HMVMX_READ_EXIT_INSTR_LEN
10775 | HMVMX_READ_IDT_VECTORING_INFO
10776 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10777
10778 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10779
10780 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10781 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10782
10783 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10784 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10785 pVmxTransient->uIdtVectoringErrorCode);
10786 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10787}
10788
10789
10790/**
10791 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10792 * Conditional VM-exit.
10793 */
10794HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10795{
10796 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10797
10798 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10799 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10800 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10801}
10802
10803
10804/**
10805 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10806 * Conditional VM-exit.
10807 */
10808HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10809{
10810 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10811
10812 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10813 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10814 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10815}
10816
10817
10818/**
10819 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10820 */
10821HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10822{
10823 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10824
10825 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10826 {
10827 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10828 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10829 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10830 }
10831 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10832}
10833
10834
10835/**
10836 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10837 */
10838HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10839{
10840 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10841
10842 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10843 {
10844 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10845 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10846 }
10847 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10848}
10849
10850
10851/**
10852 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10853 */
10854HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10855{
10856 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10857
10858 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10859 {
10860 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10861 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10862 | HMVMX_READ_EXIT_INSTR_INFO
10863 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10864 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10865 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10866 }
10867 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10868}
10869
10870
10871/**
10872 * Nested-guest VM-exit handler for invalid-guest state
10873 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10874 */
10875HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10876{
10877 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10878
10879 /*
10880 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10881 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10882 * Handle it like it's in an invalid guest state of the outer guest.
10883 *
10884 * When the fast path is implemented, this should be changed to cause the corresponding
10885 * nested-guest VM-exit.
10886 */
10887 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10888}
10889
10890
10891/**
10892 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10893 * and only provide the instruction length.
10894 *
10895 * Unconditional VM-exit.
10896 */
10897HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10898{
10899 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10900
10901#ifdef VBOX_STRICT
10902 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10903 switch (pVmxTransient->uExitReason)
10904 {
10905 case VMX_EXIT_ENCLS:
10906 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10907 break;
10908
10909 case VMX_EXIT_VMFUNC:
10910 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10911 break;
10912 }
10913#endif
10914
10915 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10916 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10917}
10918
10919
10920/**
10921 * Nested-guest VM-exit handler for instructions that provide instruction length as
10922 * well as more information.
10923 *
10924 * Unconditional VM-exit.
10925 */
10926HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10927{
10928 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10929
10930# ifdef VBOX_STRICT
10931 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10932 switch (pVmxTransient->uExitReason)
10933 {
10934 case VMX_EXIT_GDTR_IDTR_ACCESS:
10935 case VMX_EXIT_LDTR_TR_ACCESS:
10936 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10937 break;
10938
10939 case VMX_EXIT_RDRAND:
10940 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10941 break;
10942
10943 case VMX_EXIT_RDSEED:
10944 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10945 break;
10946
10947 case VMX_EXIT_XSAVES:
10948 case VMX_EXIT_XRSTORS:
10949 /** @todo NSTVMX: Verify XSS-bitmap. */
10950 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10951 break;
10952
10953 case VMX_EXIT_UMWAIT:
10954 case VMX_EXIT_TPAUSE:
10955 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10956 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10957 break;
10958
10959 case VMX_EXIT_LOADIWKEY:
10960 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10961 break;
10962 }
10963# endif
10964
10965 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10966 | HMVMX_READ_EXIT_INSTR_LEN
10967 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10968 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10969 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10970}
10971
10972# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10973
10974/**
10975 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10976 * Conditional VM-exit.
10977 */
10978HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10979{
10980 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10981 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10982
10983 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10984 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10985 {
10986 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10987 | HMVMX_READ_EXIT_INSTR_LEN
10988 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10989 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10990 | HMVMX_READ_IDT_VECTORING_INFO
10991 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10992 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10993 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10994 AssertRCReturn(rc, rc);
10995
10996 /*
10997 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10998 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10999 * it's its problem to deal with that issue and we'll clear the recovered event.
11000 */
11001 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
11002 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11003 { /*likely*/ }
11004 else
11005 {
11006 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
11007 return rcStrict;
11008 }
11009 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
11010
11011 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11012 uint64_t const uExitQual = pVmxTransient->uExitQual;
11013
11014 RTGCPTR GCPtrNestedFault;
11015 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
11016 if (fIsLinearAddrValid)
11017 {
11018 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
11019 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
11020 }
11021 else
11022 GCPtrNestedFault = 0;
11023
11024 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
11025 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
11026 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
11027 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
11028 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
11029
11030 PGMPTWALK Walk;
11031 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11032 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
11033 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
11034 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
11035 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
11036 if (RT_SUCCESS(rcStrict))
11037 {
11038 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
11039 {
11040 Assert(!fClearEventOnForward);
11041 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
11042 rcStrict = VINF_EM_RESCHEDULE_REM;
11043 }
11044 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
11045 return rcStrict;
11046 }
11047
11048 if (fClearEventOnForward)
11049 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
11050
11051 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11052 pVmxTransient->uIdtVectoringErrorCode);
11053 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
11054 {
11055 VMXVEXITINFO const ExitInfo
11056 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
11057 pVmxTransient->uExitQual,
11058 pVmxTransient->cbExitInstr,
11059 pVmxTransient->uGuestLinearAddr,
11060 pVmxTransient->uGuestPhysicalAddr);
11061 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
11062 }
11063
11064 AssertMsgReturn(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG,
11065 ("uErr=%#RX32 uExitQual=%#RX64 GCPhysNestedFault=%#RGp GCPtrNestedFault=%#RGv\n",
11066 (uint32_t)uErr, uExitQual, GCPhysNestedFault, GCPtrNestedFault),
11067 rcStrict);
11068 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11069 }
11070
11071 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
11072}
11073
11074
11075/**
11076 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11077 * Conditional VM-exit.
11078 */
11079HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11080{
11081 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11082 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
11083
11084 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11085 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
11086 {
11087 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
11088 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
11089 AssertRCReturn(rc, rc);
11090
11091 PGMPTWALK Walk;
11092 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11093 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11094 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
11095 GCPhysNestedFault, false /* fIsLinearAddrValid */,
11096 0 /* GCPtrNestedFault */, &Walk);
11097 if (RT_SUCCESS(rcStrict))
11098 {
11099 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
11100 return rcStrict;
11101 }
11102
11103 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
11104 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
11105 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
11106
11107 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11108 pVmxTransient->uIdtVectoringErrorCode);
11109 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11110 }
11111
11112 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
11113}
11114
11115# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
11116
11117/** @} */
11118#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11119
11120
11121/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11122 * probes.
11123 *
11124 * The following few functions and associated structure contains the bloat
11125 * necessary for providing detailed debug events and dtrace probes as well as
11126 * reliable host side single stepping. This works on the principle of
11127 * "subclassing" the normal execution loop and workers. We replace the loop
11128 * method completely and override selected helpers to add necessary adjustments
11129 * to their core operation.
11130 *
11131 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11132 * any performance for debug and analysis features.
11133 *
11134 * @{
11135 */
11136
11137/**
11138 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11139 * the debug run loop.
11140 */
11141typedef struct VMXRUNDBGSTATE
11142{
11143 /** The RIP we started executing at. This is for detecting that we stepped. */
11144 uint64_t uRipStart;
11145 /** The CS we started executing with. */
11146 uint16_t uCsStart;
11147
11148 /** Whether we've actually modified the 1st execution control field. */
11149 bool fModifiedProcCtls : 1;
11150 /** Whether we've actually modified the 2nd execution control field. */
11151 bool fModifiedProcCtls2 : 1;
11152 /** Whether we've actually modified the exception bitmap. */
11153 bool fModifiedXcptBitmap : 1;
11154
11155 /** We desire the modified the CR0 mask to be cleared. */
11156 bool fClearCr0Mask : 1;
11157 /** We desire the modified the CR4 mask to be cleared. */
11158 bool fClearCr4Mask : 1;
11159 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11160 uint32_t fCpe1Extra;
11161 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11162 uint32_t fCpe1Unwanted;
11163 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11164 uint32_t fCpe2Extra;
11165 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11166 uint32_t bmXcptExtra;
11167 /** The sequence number of the Dtrace provider settings the state was
11168 * configured against. */
11169 uint32_t uDtraceSettingsSeqNo;
11170 /** VM-exits to check (one bit per VM-exit). */
11171 uint32_t bmExitsToCheck[3];
11172
11173 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11174 uint32_t fProcCtlsInitial;
11175 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11176 uint32_t fProcCtls2Initial;
11177 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11178 uint32_t bmXcptInitial;
11179} VMXRUNDBGSTATE;
11180AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11181typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11182
11183
11184/**
11185 * Initializes the VMXRUNDBGSTATE structure.
11186 *
11187 * @param pVCpu The cross context virtual CPU structure of the
11188 * calling EMT.
11189 * @param pVmxTransient The VMX-transient structure.
11190 * @param pDbgState The debug state to initialize.
11191 */
11192static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11193{
11194 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11195 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11196
11197 pDbgState->fModifiedProcCtls = false;
11198 pDbgState->fModifiedProcCtls2 = false;
11199 pDbgState->fModifiedXcptBitmap = false;
11200 pDbgState->fClearCr0Mask = false;
11201 pDbgState->fClearCr4Mask = false;
11202 pDbgState->fCpe1Extra = 0;
11203 pDbgState->fCpe1Unwanted = 0;
11204 pDbgState->fCpe2Extra = 0;
11205 pDbgState->bmXcptExtra = 0;
11206 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11207 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11208 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11209}
11210
11211
11212/**
11213 * Updates the VMSC fields with changes requested by @a pDbgState.
11214 *
11215 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11216 * immediately before executing guest code, i.e. when interrupts are disabled.
11217 * We don't check status codes here as we cannot easily assert or return in the
11218 * latter case.
11219 *
11220 * @param pVCpu The cross context virtual CPU structure.
11221 * @param pVmxTransient The VMX-transient structure.
11222 * @param pDbgState The debug state.
11223 */
11224static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11225{
11226 /*
11227 * Ensure desired flags in VMCS control fields are set.
11228 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11229 *
11230 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11231 * there should be no stale data in pCtx at this point.
11232 */
11233 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11234 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11235 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11236 {
11237 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11238 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11239 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11240 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11241 pDbgState->fModifiedProcCtls = true;
11242 }
11243
11244 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11245 {
11246 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11247 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11248 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11249 pDbgState->fModifiedProcCtls2 = true;
11250 }
11251
11252 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11253 {
11254 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11255 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11256 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11257 pDbgState->fModifiedXcptBitmap = true;
11258 }
11259
11260 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11261 {
11262 pVmcsInfo->u64Cr0Mask = 0;
11263 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11264 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11265 }
11266
11267 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11268 {
11269 pVmcsInfo->u64Cr4Mask = 0;
11270 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11271 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11272 }
11273
11274 NOREF(pVCpu);
11275}
11276
11277
11278/**
11279 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11280 * re-entry next time around.
11281 *
11282 * @returns Strict VBox status code (i.e. informational status codes too).
11283 * @param pVCpu The cross context virtual CPU structure.
11284 * @param pVmxTransient The VMX-transient structure.
11285 * @param pDbgState The debug state.
11286 * @param rcStrict The return code from executing the guest using single
11287 * stepping.
11288 */
11289static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11290 VBOXSTRICTRC rcStrict)
11291{
11292 /*
11293 * Restore VM-exit control settings as we may not reenter this function the
11294 * next time around.
11295 */
11296 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11297
11298 /* We reload the initial value, trigger what we can of recalculations the
11299 next time around. From the looks of things, that's all that's required atm. */
11300 if (pDbgState->fModifiedProcCtls)
11301 {
11302 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11303 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11304 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11305 AssertRC(rc2);
11306 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11307 }
11308
11309 /* We're currently the only ones messing with this one, so just restore the
11310 cached value and reload the field. */
11311 if ( pDbgState->fModifiedProcCtls2
11312 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11313 {
11314 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11315 AssertRC(rc2);
11316 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11317 }
11318
11319 /* If we've modified the exception bitmap, we restore it and trigger
11320 reloading and partial recalculation the next time around. */
11321 if (pDbgState->fModifiedXcptBitmap)
11322 {
11323 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11324 AssertRC(rc2);
11325 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11326 }
11327
11328 return rcStrict;
11329}
11330
11331
11332/**
11333 * Configures VM-exit controls for current DBGF and DTrace settings.
11334 *
11335 * This updates @a pDbgState and the VMCS execution control fields to reflect
11336 * the necessary VM-exits demanded by DBGF and DTrace.
11337 *
11338 * @param pVCpu The cross context virtual CPU structure.
11339 * @param pVmxTransient The VMX-transient structure. May update
11340 * fUpdatedTscOffsettingAndPreemptTimer.
11341 * @param pDbgState The debug state.
11342 */
11343static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11344{
11345#ifndef IN_NEM_DARWIN
11346 /*
11347 * Take down the dtrace serial number so we can spot changes.
11348 */
11349 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11350 ASMCompilerBarrier();
11351#endif
11352
11353 /*
11354 * We'll rebuild most of the middle block of data members (holding the
11355 * current settings) as we go along here, so start by clearing it all.
11356 */
11357 pDbgState->bmXcptExtra = 0;
11358 pDbgState->fCpe1Extra = 0;
11359 pDbgState->fCpe1Unwanted = 0;
11360 pDbgState->fCpe2Extra = 0;
11361 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11362 pDbgState->bmExitsToCheck[i] = 0;
11363
11364 /*
11365 * Software interrupts (INT XXh) - no idea how to trigger these...
11366 */
11367 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11368 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11369 || VBOXVMM_INT_SOFTWARE_ENABLED())
11370 {
11371 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11372 }
11373
11374 /*
11375 * INT3 breakpoints - triggered by #BP exceptions.
11376 */
11377 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11378 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11379
11380 /*
11381 * Exception bitmap and XCPT events+probes.
11382 */
11383 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11384 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11385 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11386
11387 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11388 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11389 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11390 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11391 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11392 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11393 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11394 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11395 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11396 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11397 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11398 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11399 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11400 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11401 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11402 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11403 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11404 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11405
11406 if (pDbgState->bmXcptExtra)
11407 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11408
11409 /*
11410 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11411 *
11412 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11413 * So, when adding/changing/removing please don't forget to update it.
11414 *
11415 * Some of the macros are picking up local variables to save horizontal space,
11416 * (being able to see it in a table is the lesser evil here).
11417 */
11418#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11419 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11420 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11421#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11422 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11423 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11424 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11425 } else do { } while (0)
11426#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11427 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11428 { \
11429 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11430 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11431 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11432 } else do { } while (0)
11433#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11434 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11435 { \
11436 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11437 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11438 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11439 } else do { } while (0)
11440#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11441 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11442 { \
11443 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11444 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11445 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11446 } else do { } while (0)
11447
11448 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11449 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11450 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11451 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11452 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11453
11454 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11455 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11456 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11457 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11458 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11459 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11460 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11461 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11462 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11463 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11464 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11465 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11466 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11467 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11468 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11469 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11470 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11471 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11472 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11473 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11474 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11475 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11476 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11477 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11478 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11479 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11480 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11481 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11482 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11483 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11484 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11485 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11486 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11487 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11488 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11489 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11490
11491 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11492 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11493 {
11494 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11495 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11496 AssertRC(rc);
11497
11498#if 0 /** @todo fix me */
11499 pDbgState->fClearCr0Mask = true;
11500 pDbgState->fClearCr4Mask = true;
11501#endif
11502 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11503 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11504 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11505 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11506 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11507 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11508 require clearing here and in the loop if we start using it. */
11509 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11510 }
11511 else
11512 {
11513 if (pDbgState->fClearCr0Mask)
11514 {
11515 pDbgState->fClearCr0Mask = false;
11516 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11517 }
11518 if (pDbgState->fClearCr4Mask)
11519 {
11520 pDbgState->fClearCr4Mask = false;
11521 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11522 }
11523 }
11524 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11525 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11526
11527 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11528 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11529 {
11530 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11531 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11532 }
11533 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11534 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11535
11536 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11537 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11538 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11539 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11540 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11541 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11542 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11543 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11544#if 0 /** @todo too slow, fix handler. */
11545 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11546#endif
11547 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11548
11549 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11550 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11551 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11552 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11553 {
11554 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11555 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11556 }
11557 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11558 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11559 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11560 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11561
11562 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11563 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11564 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11565 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11566 {
11567 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11568 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11569 }
11570 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11571 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11572 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11573 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11574
11575 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11576 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11577 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11578 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11579 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11580 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11581 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11582 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11583 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11584 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11585 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11586 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11587 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11588 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11589 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11590 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11591 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11592 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11593 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11594 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11595 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11596 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11597
11598#undef IS_EITHER_ENABLED
11599#undef SET_ONLY_XBM_IF_EITHER_EN
11600#undef SET_CPE1_XBM_IF_EITHER_EN
11601#undef SET_CPEU_XBM_IF_EITHER_EN
11602#undef SET_CPE2_XBM_IF_EITHER_EN
11603
11604 /*
11605 * Sanitize the control stuff.
11606 */
11607 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11608 if (pDbgState->fCpe2Extra)
11609 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11610 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11611 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11612#ifndef IN_NEM_DARWIN
11613 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11614 {
11615 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11616 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11617 }
11618#else
11619 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11620 {
11621 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11622 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11623 }
11624#endif
11625
11626 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11627 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11628 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11629 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11630}
11631
11632
11633/**
11634 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11635 * appropriate.
11636 *
11637 * The caller has checked the VM-exit against the
11638 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11639 * already, so we don't have to do that either.
11640 *
11641 * @returns Strict VBox status code (i.e. informational status codes too).
11642 * @param pVCpu The cross context virtual CPU structure.
11643 * @param pVmxTransient The VMX-transient structure.
11644 * @param uExitReason The VM-exit reason.
11645 *
11646 * @remarks The name of this function is displayed by dtrace, so keep it short
11647 * and to the point. No longer than 33 chars long, please.
11648 */
11649static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11650{
11651 /*
11652 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11653 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11654 *
11655 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11656 * does. Must add/change/remove both places. Same ordering, please.
11657 *
11658 * Added/removed events must also be reflected in the next section
11659 * where we dispatch dtrace events.
11660 */
11661 bool fDtrace1 = false;
11662 bool fDtrace2 = false;
11663 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11664 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11665 uint32_t uEventArg = 0;
11666#define SET_EXIT(a_EventSubName) \
11667 do { \
11668 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11669 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11670 } while (0)
11671#define SET_BOTH(a_EventSubName) \
11672 do { \
11673 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11674 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11675 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11676 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11677 } while (0)
11678 switch (uExitReason)
11679 {
11680 case VMX_EXIT_MTF:
11681 return vmxHCExitMtf(pVCpu, pVmxTransient);
11682
11683 case VMX_EXIT_XCPT_OR_NMI:
11684 {
11685 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11686 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11687 {
11688 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11689 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11690 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11691 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11692 {
11693 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11694 {
11695 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11696 uEventArg = pVmxTransient->uExitIntErrorCode;
11697 }
11698 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11699 switch (enmEvent1)
11700 {
11701 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11702 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11703 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11704 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11705 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11706 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11707 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11708 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11709 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11710 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11711 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11712 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11713 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11714 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11715 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11716 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11717 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11718 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11719 default: break;
11720 }
11721 }
11722 else
11723 AssertFailed();
11724 break;
11725
11726 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11727 uEventArg = idxVector;
11728 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11729 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11730 break;
11731 }
11732 break;
11733 }
11734
11735 case VMX_EXIT_TRIPLE_FAULT:
11736 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11737 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11738 break;
11739 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11740 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11741 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11742 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11743 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11744
11745 /* Instruction specific VM-exits: */
11746 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11747 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11748 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11749 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11750 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11751 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11752 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11753 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11754 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11755 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11756 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11757 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11758 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11759 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11760 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11761 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11762 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11763 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11764 case VMX_EXIT_MOV_CRX:
11765 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11766 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11767 SET_BOTH(CRX_READ);
11768 else
11769 SET_BOTH(CRX_WRITE);
11770 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11771 break;
11772 case VMX_EXIT_MOV_DRX:
11773 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11774 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11775 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11776 SET_BOTH(DRX_READ);
11777 else
11778 SET_BOTH(DRX_WRITE);
11779 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11780 break;
11781 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11782 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11783 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11784 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11785 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11786 case VMX_EXIT_GDTR_IDTR_ACCESS:
11787 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11788 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11789 {
11790 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11791 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11792 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11793 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11794 }
11795 break;
11796
11797 case VMX_EXIT_LDTR_TR_ACCESS:
11798 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11799 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11800 {
11801 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11802 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11803 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11804 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11805 }
11806 break;
11807
11808 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11809 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11810 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11811 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11812 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11813 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11814 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11815 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11816 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11817 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11818 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11819
11820 /* Events that aren't relevant at this point. */
11821 case VMX_EXIT_EXT_INT:
11822 case VMX_EXIT_INT_WINDOW:
11823 case VMX_EXIT_NMI_WINDOW:
11824 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11825 case VMX_EXIT_PREEMPT_TIMER:
11826 case VMX_EXIT_IO_INSTR:
11827 break;
11828
11829 /* Errors and unexpected events. */
11830 case VMX_EXIT_INIT_SIGNAL:
11831 case VMX_EXIT_SIPI:
11832 case VMX_EXIT_IO_SMI:
11833 case VMX_EXIT_SMI:
11834 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11835 case VMX_EXIT_ERR_MSR_LOAD:
11836 case VMX_EXIT_ERR_MACHINE_CHECK:
11837 case VMX_EXIT_PML_FULL:
11838 case VMX_EXIT_VIRTUALIZED_EOI:
11839 break;
11840
11841 default:
11842 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11843 break;
11844 }
11845#undef SET_BOTH
11846#undef SET_EXIT
11847
11848 /*
11849 * Dtrace tracepoints go first. We do them here at once so we don't
11850 * have to copy the guest state saving and stuff a few dozen times.
11851 * Down side is that we've got to repeat the switch, though this time
11852 * we use enmEvent since the probes are a subset of what DBGF does.
11853 */
11854 if (fDtrace1 || fDtrace2)
11855 {
11856 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11857 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11858 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; RT_NOREF(pCtx); /* Shut up Clang 13. */
11859 switch (enmEvent1)
11860 {
11861 /** @todo consider which extra parameters would be helpful for each probe. */
11862 case DBGFEVENT_END: break;
11863 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11864 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11865 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11866 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11867 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11868 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11869 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11870 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11871 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11872 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11873 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11874 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11875 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11876 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11877 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11878 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11879 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11880 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11881 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11882 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11883 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11884 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11885 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11886 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11887 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11888 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11889 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11890 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11891 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11892 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11893 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11894 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11895 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11896 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11897 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11898 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11899 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11900 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11901 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11902 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11903 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11904 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11905 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11906 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11907 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11908 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11909 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11910 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11911 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11912 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11913 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11914 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11915 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11916 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11917 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11918 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11919 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11920 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11921 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11922 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11923 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11924 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11925 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11926 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11927 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11928 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11929 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11930 }
11931 switch (enmEvent2)
11932 {
11933 /** @todo consider which extra parameters would be helpful for each probe. */
11934 case DBGFEVENT_END: break;
11935 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11936 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11937 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11938 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11939 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11940 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11941 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11942 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11943 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11944 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11945 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11946 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11947 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11948 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11949 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11950 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11951 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11952 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11953 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11954 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11955 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11956 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11957 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11958 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11959 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11960 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11961 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11962 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11963 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11964 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11965 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11966 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11967 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11968 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11969 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11970 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11971 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11972 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11973 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11974 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11975 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11976 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11977 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11978 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11979 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11980 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11981 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11982 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11983 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11984 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11985 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11986 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11987 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11988 }
11989 }
11990
11991 /*
11992 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11993 * the DBGF call will do a full check).
11994 *
11995 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11996 * Note! If we have to events, we prioritize the first, i.e. the instruction
11997 * one, in order to avoid event nesting.
11998 */
11999 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
12000 if ( enmEvent1 != DBGFEVENT_END
12001 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
12002 {
12003 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12004 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
12005 if (rcStrict != VINF_SUCCESS)
12006 return rcStrict;
12007 }
12008 else if ( enmEvent2 != DBGFEVENT_END
12009 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
12010 {
12011 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12012 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
12013 if (rcStrict != VINF_SUCCESS)
12014 return rcStrict;
12015 }
12016
12017 return VINF_SUCCESS;
12018}
12019
12020
12021/**
12022 * Single-stepping VM-exit filtering.
12023 *
12024 * This is preprocessing the VM-exits and deciding whether we've gotten far
12025 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
12026 * handling is performed.
12027 *
12028 * @returns Strict VBox status code (i.e. informational status codes too).
12029 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
12030 * @param pVmxTransient The VMX-transient structure.
12031 * @param pDbgState The debug state.
12032 */
12033DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
12034{
12035 /*
12036 * Expensive (saves context) generic dtrace VM-exit probe.
12037 */
12038 uint32_t const uExitReason = pVmxTransient->uExitReason;
12039 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
12040 { /* more likely */ }
12041 else
12042 {
12043 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
12044 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12045 AssertRC(rc);
12046 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
12047 }
12048
12049#ifndef IN_NEM_DARWIN
12050 /*
12051 * Check for host NMI, just to get that out of the way.
12052 */
12053 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
12054 { /* normally likely */ }
12055 else
12056 {
12057 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
12058 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
12059 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
12060 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
12061 }
12062#endif
12063
12064 /*
12065 * Check for single stepping event if we're stepping.
12066 */
12067 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
12068 {
12069 switch (uExitReason)
12070 {
12071 case VMX_EXIT_MTF:
12072 return vmxHCExitMtf(pVCpu, pVmxTransient);
12073
12074 /* Various events: */
12075 case VMX_EXIT_XCPT_OR_NMI:
12076 case VMX_EXIT_EXT_INT:
12077 case VMX_EXIT_TRIPLE_FAULT:
12078 case VMX_EXIT_INT_WINDOW:
12079 case VMX_EXIT_NMI_WINDOW:
12080 case VMX_EXIT_TASK_SWITCH:
12081 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12082 case VMX_EXIT_APIC_ACCESS:
12083 case VMX_EXIT_EPT_VIOLATION:
12084 case VMX_EXIT_EPT_MISCONFIG:
12085 case VMX_EXIT_PREEMPT_TIMER:
12086
12087 /* Instruction specific VM-exits: */
12088 case VMX_EXIT_CPUID:
12089 case VMX_EXIT_GETSEC:
12090 case VMX_EXIT_HLT:
12091 case VMX_EXIT_INVD:
12092 case VMX_EXIT_INVLPG:
12093 case VMX_EXIT_RDPMC:
12094 case VMX_EXIT_RDTSC:
12095 case VMX_EXIT_RSM:
12096 case VMX_EXIT_VMCALL:
12097 case VMX_EXIT_VMCLEAR:
12098 case VMX_EXIT_VMLAUNCH:
12099 case VMX_EXIT_VMPTRLD:
12100 case VMX_EXIT_VMPTRST:
12101 case VMX_EXIT_VMREAD:
12102 case VMX_EXIT_VMRESUME:
12103 case VMX_EXIT_VMWRITE:
12104 case VMX_EXIT_VMXOFF:
12105 case VMX_EXIT_VMXON:
12106 case VMX_EXIT_MOV_CRX:
12107 case VMX_EXIT_MOV_DRX:
12108 case VMX_EXIT_IO_INSTR:
12109 case VMX_EXIT_RDMSR:
12110 case VMX_EXIT_WRMSR:
12111 case VMX_EXIT_MWAIT:
12112 case VMX_EXIT_MONITOR:
12113 case VMX_EXIT_PAUSE:
12114 case VMX_EXIT_GDTR_IDTR_ACCESS:
12115 case VMX_EXIT_LDTR_TR_ACCESS:
12116 case VMX_EXIT_INVEPT:
12117 case VMX_EXIT_RDTSCP:
12118 case VMX_EXIT_INVVPID:
12119 case VMX_EXIT_WBINVD:
12120 case VMX_EXIT_XSETBV:
12121 case VMX_EXIT_RDRAND:
12122 case VMX_EXIT_INVPCID:
12123 case VMX_EXIT_VMFUNC:
12124 case VMX_EXIT_RDSEED:
12125 case VMX_EXIT_XSAVES:
12126 case VMX_EXIT_XRSTORS:
12127 {
12128 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12129 AssertRCReturn(rc, rc);
12130 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12131 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12132 return VINF_EM_DBG_STEPPED;
12133 break;
12134 }
12135
12136 /* Errors and unexpected events: */
12137 case VMX_EXIT_INIT_SIGNAL:
12138 case VMX_EXIT_SIPI:
12139 case VMX_EXIT_IO_SMI:
12140 case VMX_EXIT_SMI:
12141 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12142 case VMX_EXIT_ERR_MSR_LOAD:
12143 case VMX_EXIT_ERR_MACHINE_CHECK:
12144 case VMX_EXIT_PML_FULL:
12145 case VMX_EXIT_VIRTUALIZED_EOI:
12146 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12147 break;
12148
12149 default:
12150 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12151 break;
12152 }
12153 }
12154
12155 /*
12156 * Check for debugger event breakpoints and dtrace probes.
12157 */
12158 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12159 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12160 {
12161 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12162 if (rcStrict != VINF_SUCCESS)
12163 return rcStrict;
12164 }
12165
12166 /*
12167 * Normal processing.
12168 */
12169#ifdef HMVMX_USE_FUNCTION_TABLE
12170 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12171#else
12172 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12173#endif
12174}
12175
12176/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette