VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 96686

Last change on this file since 96686 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 492.6 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41
42/** Use the function table. */
43#define HMVMX_USE_FUNCTION_TABLE
44
45/** Determine which tagged-TLB flush handler to use. */
46#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
47#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
48#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
49#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
50
51/** Assert that all the given fields have been read from the VMCS. */
52#ifdef VBOX_STRICT
53# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
54 do { \
55 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
56 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
57 } while (0)
58#else
59# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
60#endif
61
62/**
63 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
64 * guest using hardware-assisted VMX.
65 *
66 * This excludes state like GPRs (other than RSP) which are always are
67 * swapped and restored across the world-switch and also registers like EFER,
68 * MSR which cannot be modified by the guest without causing a VM-exit.
69 */
70#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
71 | CPUMCTX_EXTRN_RFLAGS \
72 | CPUMCTX_EXTRN_RSP \
73 | CPUMCTX_EXTRN_SREG_MASK \
74 | CPUMCTX_EXTRN_TABLE_MASK \
75 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
76 | CPUMCTX_EXTRN_SYSCALL_MSRS \
77 | CPUMCTX_EXTRN_SYSENTER_MSRS \
78 | CPUMCTX_EXTRN_TSC_AUX \
79 | CPUMCTX_EXTRN_OTHER_MSRS \
80 | CPUMCTX_EXTRN_CR0 \
81 | CPUMCTX_EXTRN_CR3 \
82 | CPUMCTX_EXTRN_CR4 \
83 | CPUMCTX_EXTRN_DR7 \
84 | CPUMCTX_EXTRN_HWVIRT \
85 | CPUMCTX_EXTRN_INHIBIT_INT \
86 | CPUMCTX_EXTRN_INHIBIT_NMI)
87
88/**
89 * Exception bitmap mask for real-mode guests (real-on-v86).
90 *
91 * We need to intercept all exceptions manually except:
92 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
93 * due to bugs in Intel CPUs.
94 * - \#PF need not be intercepted even in real-mode if we have nested paging
95 * support.
96 */
97#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
98 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
99 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
100 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
101 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
102 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
103 | RT_BIT(X86_XCPT_XF))
104
105/** Maximum VM-instruction error number. */
106#define HMVMX_INSTR_ERROR_MAX 28
107
108/** Profiling macro. */
109#ifdef HM_PROFILE_EXIT_DISPATCH
110# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
111# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
112#else
113# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
114# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
115#endif
116
117#ifndef IN_NEM_DARWIN
118/** Assert that preemption is disabled or covered by thread-context hooks. */
119# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
120 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
121
122/** Assert that we haven't migrated CPUs when thread-context hooks are not
123 * used. */
124# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
125 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
126 ("Illegal migration! Entered on CPU %u Current %u\n", \
127 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
128#else
129# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
130# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
131#endif
132
133/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
134 * context. */
135#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
136 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
137 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
138
139/** Log the VM-exit reason with an easily visible marker to identify it in a
140 * potential sea of logging data. */
141#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
142 do { \
143 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
144 HMGetVmxExitName(a_uExitReason))); \
145 } while (0) \
146
147
148/*********************************************************************************************************************************
149* Structures and Typedefs *
150*********************************************************************************************************************************/
151/**
152 * Memory operand read or write access.
153 */
154typedef enum VMXMEMACCESS
155{
156 VMXMEMACCESS_READ = 0,
157 VMXMEMACCESS_WRITE = 1
158} VMXMEMACCESS;
159
160
161/**
162 * VMX VM-exit handler.
163 *
164 * @returns Strict VBox status code (i.e. informational status codes too).
165 * @param pVCpu The cross context virtual CPU structure.
166 * @param pVmxTransient The VMX-transient structure.
167 */
168#ifndef HMVMX_USE_FUNCTION_TABLE
169typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
170#else
171typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
172/** Pointer to VM-exit handler. */
173typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
174#endif
175
176/**
177 * VMX VM-exit handler, non-strict status code.
178 *
179 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
180 *
181 * @returns VBox status code, no informational status code returned.
182 * @param pVCpu The cross context virtual CPU structure.
183 * @param pVmxTransient The VMX-transient structure.
184 *
185 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
186 * use of that status code will be replaced with VINF_EM_SOMETHING
187 * later when switching over to IEM.
188 */
189#ifndef HMVMX_USE_FUNCTION_TABLE
190typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
191#else
192typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
193#endif
194
195
196/*********************************************************************************************************************************
197* Internal Functions *
198*********************************************************************************************************************************/
199#ifndef HMVMX_USE_FUNCTION_TABLE
200DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
201# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
202# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
203#else
204# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
205# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
206#endif
207#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
208DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
209#endif
210
211static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
212
213/** @name VM-exit handler prototypes.
214 * @{
215 */
216static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
217static FNVMXEXITHANDLER vmxHCExitExtInt;
218static FNVMXEXITHANDLER vmxHCExitTripleFault;
219static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
220static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
221static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
222static FNVMXEXITHANDLER vmxHCExitCpuid;
223static FNVMXEXITHANDLER vmxHCExitGetsec;
224static FNVMXEXITHANDLER vmxHCExitHlt;
225static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
226static FNVMXEXITHANDLER vmxHCExitInvlpg;
227static FNVMXEXITHANDLER vmxHCExitRdpmc;
228static FNVMXEXITHANDLER vmxHCExitVmcall;
229#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
230static FNVMXEXITHANDLER vmxHCExitVmclear;
231static FNVMXEXITHANDLER vmxHCExitVmlaunch;
232static FNVMXEXITHANDLER vmxHCExitVmptrld;
233static FNVMXEXITHANDLER vmxHCExitVmptrst;
234static FNVMXEXITHANDLER vmxHCExitVmread;
235static FNVMXEXITHANDLER vmxHCExitVmresume;
236static FNVMXEXITHANDLER vmxHCExitVmwrite;
237static FNVMXEXITHANDLER vmxHCExitVmxoff;
238static FNVMXEXITHANDLER vmxHCExitVmxon;
239static FNVMXEXITHANDLER vmxHCExitInvvpid;
240# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
241static FNVMXEXITHANDLER vmxHCExitInvept;
242# endif
243#endif
244static FNVMXEXITHANDLER vmxHCExitRdtsc;
245static FNVMXEXITHANDLER vmxHCExitMovCRx;
246static FNVMXEXITHANDLER vmxHCExitMovDRx;
247static FNVMXEXITHANDLER vmxHCExitIoInstr;
248static FNVMXEXITHANDLER vmxHCExitRdmsr;
249static FNVMXEXITHANDLER vmxHCExitWrmsr;
250static FNVMXEXITHANDLER vmxHCExitMwait;
251static FNVMXEXITHANDLER vmxHCExitMtf;
252static FNVMXEXITHANDLER vmxHCExitMonitor;
253static FNVMXEXITHANDLER vmxHCExitPause;
254static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
255static FNVMXEXITHANDLER vmxHCExitApicAccess;
256static FNVMXEXITHANDLER vmxHCExitEptViolation;
257static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
258static FNVMXEXITHANDLER vmxHCExitRdtscp;
259static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
260static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
261static FNVMXEXITHANDLER vmxHCExitXsetbv;
262static FNVMXEXITHANDLER vmxHCExitInvpcid;
263static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
264static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
265static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
266/** @} */
267
268#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
269/** @name Nested-guest VM-exit handler prototypes.
270 * @{
271 */
272static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
273static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
274static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
275static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
276static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
277static FNVMXEXITHANDLER vmxHCExitHltNested;
278static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
279static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
280static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
281static FNVMXEXITHANDLER vmxHCExitRdtscNested;
282static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
283static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
284static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
285static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
286static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
287static FNVMXEXITHANDLER vmxHCExitMwaitNested;
288static FNVMXEXITHANDLER vmxHCExitMtfNested;
289static FNVMXEXITHANDLER vmxHCExitMonitorNested;
290static FNVMXEXITHANDLER vmxHCExitPauseNested;
291static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
292static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
293static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
294static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
295static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
296static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
297static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
298static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
299static FNVMXEXITHANDLER vmxHCExitInstrNested;
300static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
301# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
302static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
303static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
304# endif
305/** @} */
306#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
307
308
309/*********************************************************************************************************************************
310* Global Variables *
311*********************************************************************************************************************************/
312#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
313/**
314 * Array of all VMCS fields.
315 * Any fields added to the VT-x spec. should be added here.
316 *
317 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
318 * of nested-guests.
319 */
320static const uint32_t g_aVmcsFields[] =
321{
322 /* 16-bit control fields. */
323 VMX_VMCS16_VPID,
324 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
325 VMX_VMCS16_EPTP_INDEX,
326
327 /* 16-bit guest-state fields. */
328 VMX_VMCS16_GUEST_ES_SEL,
329 VMX_VMCS16_GUEST_CS_SEL,
330 VMX_VMCS16_GUEST_SS_SEL,
331 VMX_VMCS16_GUEST_DS_SEL,
332 VMX_VMCS16_GUEST_FS_SEL,
333 VMX_VMCS16_GUEST_GS_SEL,
334 VMX_VMCS16_GUEST_LDTR_SEL,
335 VMX_VMCS16_GUEST_TR_SEL,
336 VMX_VMCS16_GUEST_INTR_STATUS,
337 VMX_VMCS16_GUEST_PML_INDEX,
338
339 /* 16-bits host-state fields. */
340 VMX_VMCS16_HOST_ES_SEL,
341 VMX_VMCS16_HOST_CS_SEL,
342 VMX_VMCS16_HOST_SS_SEL,
343 VMX_VMCS16_HOST_DS_SEL,
344 VMX_VMCS16_HOST_FS_SEL,
345 VMX_VMCS16_HOST_GS_SEL,
346 VMX_VMCS16_HOST_TR_SEL,
347
348 /* 64-bit control fields. */
349 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
350 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
351 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
352 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
353 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
354 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
355 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
356 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
357 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
358 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
359 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
360 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
361 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
362 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
363 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
364 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
365 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
366 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
367 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
368 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
369 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
370 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
371 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
372 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
373 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
374 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
375 VMX_VMCS64_CTRL_EPTP_FULL,
376 VMX_VMCS64_CTRL_EPTP_HIGH,
377 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
378 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
379 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
380 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
385 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
386 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
387 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
388 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
389 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
390 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
391 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
392 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
393 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
394 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
395 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
396 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
397 VMX_VMCS64_CTRL_SPPTP_FULL,
398 VMX_VMCS64_CTRL_SPPTP_HIGH,
399 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
400 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
401 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
402 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
403 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
404 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
405
406 /* 64-bit read-only data fields. */
407 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
408 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
409
410 /* 64-bit guest-state fields. */
411 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
412 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
413 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
414 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
415 VMX_VMCS64_GUEST_PAT_FULL,
416 VMX_VMCS64_GUEST_PAT_HIGH,
417 VMX_VMCS64_GUEST_EFER_FULL,
418 VMX_VMCS64_GUEST_EFER_HIGH,
419 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
420 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
421 VMX_VMCS64_GUEST_PDPTE0_FULL,
422 VMX_VMCS64_GUEST_PDPTE0_HIGH,
423 VMX_VMCS64_GUEST_PDPTE1_FULL,
424 VMX_VMCS64_GUEST_PDPTE1_HIGH,
425 VMX_VMCS64_GUEST_PDPTE2_FULL,
426 VMX_VMCS64_GUEST_PDPTE2_HIGH,
427 VMX_VMCS64_GUEST_PDPTE3_FULL,
428 VMX_VMCS64_GUEST_PDPTE3_HIGH,
429 VMX_VMCS64_GUEST_BNDCFGS_FULL,
430 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
431 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
432 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
433 VMX_VMCS64_GUEST_PKRS_FULL,
434 VMX_VMCS64_GUEST_PKRS_HIGH,
435
436 /* 64-bit host-state fields. */
437 VMX_VMCS64_HOST_PAT_FULL,
438 VMX_VMCS64_HOST_PAT_HIGH,
439 VMX_VMCS64_HOST_EFER_FULL,
440 VMX_VMCS64_HOST_EFER_HIGH,
441 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
442 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
443 VMX_VMCS64_HOST_PKRS_FULL,
444 VMX_VMCS64_HOST_PKRS_HIGH,
445
446 /* 32-bit control fields. */
447 VMX_VMCS32_CTRL_PIN_EXEC,
448 VMX_VMCS32_CTRL_PROC_EXEC,
449 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
450 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
451 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
452 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
453 VMX_VMCS32_CTRL_EXIT,
454 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
455 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
456 VMX_VMCS32_CTRL_ENTRY,
457 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
458 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
459 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
460 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
461 VMX_VMCS32_CTRL_TPR_THRESHOLD,
462 VMX_VMCS32_CTRL_PROC_EXEC2,
463 VMX_VMCS32_CTRL_PLE_GAP,
464 VMX_VMCS32_CTRL_PLE_WINDOW,
465
466 /* 32-bits read-only fields. */
467 VMX_VMCS32_RO_VM_INSTR_ERROR,
468 VMX_VMCS32_RO_EXIT_REASON,
469 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
470 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
471 VMX_VMCS32_RO_IDT_VECTORING_INFO,
472 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
473 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
474 VMX_VMCS32_RO_EXIT_INSTR_INFO,
475
476 /* 32-bit guest-state fields. */
477 VMX_VMCS32_GUEST_ES_LIMIT,
478 VMX_VMCS32_GUEST_CS_LIMIT,
479 VMX_VMCS32_GUEST_SS_LIMIT,
480 VMX_VMCS32_GUEST_DS_LIMIT,
481 VMX_VMCS32_GUEST_FS_LIMIT,
482 VMX_VMCS32_GUEST_GS_LIMIT,
483 VMX_VMCS32_GUEST_LDTR_LIMIT,
484 VMX_VMCS32_GUEST_TR_LIMIT,
485 VMX_VMCS32_GUEST_GDTR_LIMIT,
486 VMX_VMCS32_GUEST_IDTR_LIMIT,
487 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
488 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
489 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
490 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
491 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_INT_STATE,
496 VMX_VMCS32_GUEST_ACTIVITY_STATE,
497 VMX_VMCS32_GUEST_SMBASE,
498 VMX_VMCS32_GUEST_SYSENTER_CS,
499 VMX_VMCS32_PREEMPT_TIMER_VALUE,
500
501 /* 32-bit host-state fields. */
502 VMX_VMCS32_HOST_SYSENTER_CS,
503
504 /* Natural-width control fields. */
505 VMX_VMCS_CTRL_CR0_MASK,
506 VMX_VMCS_CTRL_CR4_MASK,
507 VMX_VMCS_CTRL_CR0_READ_SHADOW,
508 VMX_VMCS_CTRL_CR4_READ_SHADOW,
509 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
510 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
511 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
512 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
513
514 /* Natural-width read-only data fields. */
515 VMX_VMCS_RO_EXIT_QUALIFICATION,
516 VMX_VMCS_RO_IO_RCX,
517 VMX_VMCS_RO_IO_RSI,
518 VMX_VMCS_RO_IO_RDI,
519 VMX_VMCS_RO_IO_RIP,
520 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
521
522 /* Natural-width guest-state field */
523 VMX_VMCS_GUEST_CR0,
524 VMX_VMCS_GUEST_CR3,
525 VMX_VMCS_GUEST_CR4,
526 VMX_VMCS_GUEST_ES_BASE,
527 VMX_VMCS_GUEST_CS_BASE,
528 VMX_VMCS_GUEST_SS_BASE,
529 VMX_VMCS_GUEST_DS_BASE,
530 VMX_VMCS_GUEST_FS_BASE,
531 VMX_VMCS_GUEST_GS_BASE,
532 VMX_VMCS_GUEST_LDTR_BASE,
533 VMX_VMCS_GUEST_TR_BASE,
534 VMX_VMCS_GUEST_GDTR_BASE,
535 VMX_VMCS_GUEST_IDTR_BASE,
536 VMX_VMCS_GUEST_DR7,
537 VMX_VMCS_GUEST_RSP,
538 VMX_VMCS_GUEST_RIP,
539 VMX_VMCS_GUEST_RFLAGS,
540 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
541 VMX_VMCS_GUEST_SYSENTER_ESP,
542 VMX_VMCS_GUEST_SYSENTER_EIP,
543 VMX_VMCS_GUEST_S_CET,
544 VMX_VMCS_GUEST_SSP,
545 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
546
547 /* Natural-width host-state fields */
548 VMX_VMCS_HOST_CR0,
549 VMX_VMCS_HOST_CR3,
550 VMX_VMCS_HOST_CR4,
551 VMX_VMCS_HOST_FS_BASE,
552 VMX_VMCS_HOST_GS_BASE,
553 VMX_VMCS_HOST_TR_BASE,
554 VMX_VMCS_HOST_GDTR_BASE,
555 VMX_VMCS_HOST_IDTR_BASE,
556 VMX_VMCS_HOST_SYSENTER_ESP,
557 VMX_VMCS_HOST_SYSENTER_EIP,
558 VMX_VMCS_HOST_RSP,
559 VMX_VMCS_HOST_RIP,
560 VMX_VMCS_HOST_S_CET,
561 VMX_VMCS_HOST_SSP,
562 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
563};
564#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
565
566#ifdef VBOX_STRICT
567static const uint32_t g_aVmcsSegBase[] =
568{
569 VMX_VMCS_GUEST_ES_BASE,
570 VMX_VMCS_GUEST_CS_BASE,
571 VMX_VMCS_GUEST_SS_BASE,
572 VMX_VMCS_GUEST_DS_BASE,
573 VMX_VMCS_GUEST_FS_BASE,
574 VMX_VMCS_GUEST_GS_BASE
575};
576static const uint32_t g_aVmcsSegSel[] =
577{
578 VMX_VMCS16_GUEST_ES_SEL,
579 VMX_VMCS16_GUEST_CS_SEL,
580 VMX_VMCS16_GUEST_SS_SEL,
581 VMX_VMCS16_GUEST_DS_SEL,
582 VMX_VMCS16_GUEST_FS_SEL,
583 VMX_VMCS16_GUEST_GS_SEL
584};
585static const uint32_t g_aVmcsSegLimit[] =
586{
587 VMX_VMCS32_GUEST_ES_LIMIT,
588 VMX_VMCS32_GUEST_CS_LIMIT,
589 VMX_VMCS32_GUEST_SS_LIMIT,
590 VMX_VMCS32_GUEST_DS_LIMIT,
591 VMX_VMCS32_GUEST_FS_LIMIT,
592 VMX_VMCS32_GUEST_GS_LIMIT
593};
594static const uint32_t g_aVmcsSegAttr[] =
595{
596 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
597 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
598 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
599 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
600 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
601 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
602};
603AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
604AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
605AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
606AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
607#endif /* VBOX_STRICT */
608
609#ifdef HMVMX_USE_FUNCTION_TABLE
610/**
611 * VMX_EXIT dispatch table.
612 */
613static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
614{
615 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
616 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
617 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
618 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
619 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
620 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
621 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
622 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
623 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
624 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
625 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
626 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
627 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
628 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
629 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
630 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
631 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
632 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
633 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
634#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
635 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
636 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
637 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
638 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
639 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
640 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
641 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
642 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
643 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
644#else
645 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
646 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
647 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
648 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
649 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
650 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
651 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
652 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
653 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
654#endif
655 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
656 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
657 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
658 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
659 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
660 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
661 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
662 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
663 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
664 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
665 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
666 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
667 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
668 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
669 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
670 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
671 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
672 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
673 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
674 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
675 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
676 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
677#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
678 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
679#else
680 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
681#endif
682 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
683 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
684#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
685 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
686#else
687 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
688#endif
689 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
690 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
691 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
692 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
693 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
694 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
695 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
696 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
697 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
698 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
699 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
700 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
701 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
702 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
703 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
704 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
705};
706#endif /* HMVMX_USE_FUNCTION_TABLE */
707
708#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
709static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
710{
711 /* 0 */ "(Not Used)",
712 /* 1 */ "VMCALL executed in VMX root operation.",
713 /* 2 */ "VMCLEAR with invalid physical address.",
714 /* 3 */ "VMCLEAR with VMXON pointer.",
715 /* 4 */ "VMLAUNCH with non-clear VMCS.",
716 /* 5 */ "VMRESUME with non-launched VMCS.",
717 /* 6 */ "VMRESUME after VMXOFF",
718 /* 7 */ "VM-entry with invalid control fields.",
719 /* 8 */ "VM-entry with invalid host state fields.",
720 /* 9 */ "VMPTRLD with invalid physical address.",
721 /* 10 */ "VMPTRLD with VMXON pointer.",
722 /* 11 */ "VMPTRLD with incorrect revision identifier.",
723 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
724 /* 13 */ "VMWRITE to read-only VMCS component.",
725 /* 14 */ "(Not Used)",
726 /* 15 */ "VMXON executed in VMX root operation.",
727 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
728 /* 17 */ "VM-entry with non-launched executing VMCS.",
729 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
730 /* 19 */ "VMCALL with non-clear VMCS.",
731 /* 20 */ "VMCALL with invalid VM-exit control fields.",
732 /* 21 */ "(Not Used)",
733 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
734 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
735 /* 24 */ "VMCALL with invalid SMM-monitor features.",
736 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
737 /* 26 */ "VM-entry with events blocked by MOV SS.",
738 /* 27 */ "(Not Used)",
739 /* 28 */ "Invalid operand to INVEPT/INVVPID."
740};
741#endif /* VBOX_STRICT && LOG_ENABLED */
742
743
744/**
745 * Gets the CR0 guest/host mask.
746 *
747 * These bits typically does not change through the lifetime of a VM. Any bit set in
748 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
749 * by the guest.
750 *
751 * @returns The CR0 guest/host mask.
752 * @param pVCpu The cross context virtual CPU structure.
753 */
754static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
755{
756 /*
757 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
758 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
759 *
760 * Furthermore, modifications to any bits that are reserved/unspecified currently
761 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
762 * when future CPUs specify and use currently reserved/unspecified bits.
763 */
764 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
765 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
766 * and @bugref{6944}. */
767 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
768 return ( X86_CR0_PE
769 | X86_CR0_NE
770 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
771 | X86_CR0_PG
772 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
773}
774
775
776/**
777 * Gets the CR4 guest/host mask.
778 *
779 * These bits typically does not change through the lifetime of a VM. Any bit set in
780 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
781 * by the guest.
782 *
783 * @returns The CR4 guest/host mask.
784 * @param pVCpu The cross context virtual CPU structure.
785 */
786static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
787{
788 /*
789 * We construct a mask of all CR4 bits that the guest can modify without causing
790 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
791 * a VM-exit when the guest attempts to modify them when executing using
792 * hardware-assisted VMX.
793 *
794 * When a feature is not exposed to the guest (and may be present on the host),
795 * we want to intercept guest modifications to the bit so we can emulate proper
796 * behavior (e.g., #GP).
797 *
798 * Furthermore, only modifications to those bits that don't require immediate
799 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
800 * depends on CR3 which might not always be the guest value while executing
801 * using hardware-assisted VMX.
802 */
803 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
804 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
805#ifdef IN_NEM_DARWIN
806 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
807#endif
808 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
809
810 /*
811 * Paranoia.
812 * Ensure features exposed to the guest are present on the host.
813 */
814 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
815#ifdef IN_NEM_DARWIN
816 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
817#endif
818 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
819
820 uint64_t const fGstMask = X86_CR4_PVI
821 | X86_CR4_TSD
822 | X86_CR4_DE
823 | X86_CR4_MCE
824 | X86_CR4_PCE
825 | X86_CR4_OSXMMEEXCPT
826 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
827#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
828 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
829 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
830#endif
831 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
832 return ~fGstMask;
833}
834
835
836/**
837 * Adds one or more exceptions to the exception bitmap and commits it to the current
838 * VMCS.
839 *
840 * @param pVCpu The cross context virtual CPU structure.
841 * @param pVmxTransient The VMX-transient structure.
842 * @param uXcptMask The exception(s) to add.
843 */
844static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
845{
846 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
847 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
848 if ((uXcptBitmap & uXcptMask) != uXcptMask)
849 {
850 uXcptBitmap |= uXcptMask;
851 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
852 AssertRC(rc);
853 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
854 }
855}
856
857
858/**
859 * Adds an exception to the exception bitmap and commits it to the current VMCS.
860 *
861 * @param pVCpu The cross context virtual CPU structure.
862 * @param pVmxTransient The VMX-transient structure.
863 * @param uXcpt The exception to add.
864 */
865static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
866{
867 Assert(uXcpt <= X86_XCPT_LAST);
868 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
869}
870
871
872/**
873 * Remove one or more exceptions from the exception bitmap and commits it to the
874 * current VMCS.
875 *
876 * This takes care of not removing the exception intercept if a nested-guest
877 * requires the exception to be intercepted.
878 *
879 * @returns VBox status code.
880 * @param pVCpu The cross context virtual CPU structure.
881 * @param pVmxTransient The VMX-transient structure.
882 * @param uXcptMask The exception(s) to remove.
883 */
884static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
885{
886 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
887 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
888 if (u32XcptBitmap & uXcptMask)
889 {
890#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
891 if (!pVmxTransient->fIsNestedGuest)
892 { /* likely */ }
893 else
894 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
895#endif
896#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
897 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
898 | RT_BIT(X86_XCPT_DE)
899 | RT_BIT(X86_XCPT_NM)
900 | RT_BIT(X86_XCPT_TS)
901 | RT_BIT(X86_XCPT_UD)
902 | RT_BIT(X86_XCPT_NP)
903 | RT_BIT(X86_XCPT_SS)
904 | RT_BIT(X86_XCPT_GP)
905 | RT_BIT(X86_XCPT_PF)
906 | RT_BIT(X86_XCPT_MF));
907#elif defined(HMVMX_ALWAYS_TRAP_PF)
908 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
909#endif
910 if (uXcptMask)
911 {
912 /* Validate we are not removing any essential exception intercepts. */
913#ifndef IN_NEM_DARWIN
914 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
915#else
916 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
917#endif
918 NOREF(pVCpu);
919 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
920 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
921
922 /* Remove it from the exception bitmap. */
923 u32XcptBitmap &= ~uXcptMask;
924
925 /* Commit and update the cache if necessary. */
926 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
927 {
928 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
929 AssertRC(rc);
930 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
931 }
932 }
933 }
934 return VINF_SUCCESS;
935}
936
937
938/**
939 * Remove an exceptions from the exception bitmap and commits it to the current
940 * VMCS.
941 *
942 * @returns VBox status code.
943 * @param pVCpu The cross context virtual CPU structure.
944 * @param pVmxTransient The VMX-transient structure.
945 * @param uXcpt The exception to remove.
946 */
947static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
948{
949 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
950}
951
952
953#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
954/**
955 * Loads the shadow VMCS specified by the VMCS info. object.
956 *
957 * @returns VBox status code.
958 * @param pVmcsInfo The VMCS info. object.
959 *
960 * @remarks Can be called with interrupts disabled.
961 */
962static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
963{
964 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
965 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
966
967 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
968 if (RT_SUCCESS(rc))
969 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
970 return rc;
971}
972
973
974/**
975 * Clears the shadow VMCS specified by the VMCS info. object.
976 *
977 * @returns VBox status code.
978 * @param pVmcsInfo The VMCS info. object.
979 *
980 * @remarks Can be called with interrupts disabled.
981 */
982static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
983{
984 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
985 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
986
987 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
988 if (RT_SUCCESS(rc))
989 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
990 return rc;
991}
992
993
994/**
995 * Switches from and to the specified VMCSes.
996 *
997 * @returns VBox status code.
998 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
999 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1000 *
1001 * @remarks Called with interrupts disabled.
1002 */
1003static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1004{
1005 /*
1006 * Clear the VMCS we are switching out if it has not already been cleared.
1007 * This will sync any CPU internal data back to the VMCS.
1008 */
1009 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1010 {
1011 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1012 if (RT_SUCCESS(rc))
1013 {
1014 /*
1015 * The shadow VMCS, if any, would not be active at this point since we
1016 * would have cleared it while importing the virtual hardware-virtualization
1017 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1018 * clear the shadow VMCS here, just assert for safety.
1019 */
1020 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1021 }
1022 else
1023 return rc;
1024 }
1025
1026 /*
1027 * Clear the VMCS we are switching to if it has not already been cleared.
1028 * This will initialize the VMCS launch state to "clear" required for loading it.
1029 *
1030 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1031 */
1032 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1033 {
1034 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1035 if (RT_SUCCESS(rc))
1036 { /* likely */ }
1037 else
1038 return rc;
1039 }
1040
1041 /*
1042 * Finally, load the VMCS we are switching to.
1043 */
1044 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1045}
1046
1047
1048/**
1049 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1050 * caller.
1051 *
1052 * @returns VBox status code.
1053 * @param pVCpu The cross context virtual CPU structure.
1054 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1055 * true) or guest VMCS (pass false).
1056 */
1057static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1058{
1059 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1060 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1061
1062 PVMXVMCSINFO pVmcsInfoFrom;
1063 PVMXVMCSINFO pVmcsInfoTo;
1064 if (fSwitchToNstGstVmcs)
1065 {
1066 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1067 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1068 }
1069 else
1070 {
1071 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1072 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1073 }
1074
1075 /*
1076 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1077 * preemption hook code path acquires the current VMCS.
1078 */
1079 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1080
1081 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1082 if (RT_SUCCESS(rc))
1083 {
1084 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1085 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1086
1087 /*
1088 * If we are switching to a VMCS that was executed on a different host CPU or was
1089 * never executed before, flag that we need to export the host state before executing
1090 * guest/nested-guest code using hardware-assisted VMX.
1091 *
1092 * This could probably be done in a preemptible context since the preemption hook
1093 * will flag the necessary change in host context. However, since preemption is
1094 * already disabled and to avoid making assumptions about host specific code in
1095 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1096 * disabled.
1097 */
1098 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1099 { /* likely */ }
1100 else
1101 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1102
1103 ASMSetFlags(fEFlags);
1104
1105 /*
1106 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1107 * flag that we need to update the host MSR values there. Even if we decide in the
1108 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1109 * if its content differs, we would have to update the host MSRs anyway.
1110 */
1111 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1112 }
1113 else
1114 ASMSetFlags(fEFlags);
1115 return rc;
1116}
1117#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1118
1119
1120#ifdef VBOX_STRICT
1121/**
1122 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1123 * transient structure.
1124 *
1125 * @param pVCpu The cross context virtual CPU structure.
1126 * @param pVmxTransient The VMX-transient structure.
1127 */
1128DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1129{
1130 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1131 AssertRC(rc);
1132}
1133
1134
1135/**
1136 * Reads the VM-entry exception error code field from the VMCS into
1137 * the VMX transient structure.
1138 *
1139 * @param pVCpu The cross context virtual CPU structure.
1140 * @param pVmxTransient The VMX-transient structure.
1141 */
1142DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1143{
1144 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1145 AssertRC(rc);
1146}
1147
1148
1149/**
1150 * Reads the VM-entry exception error code field from the VMCS into
1151 * the VMX transient structure.
1152 *
1153 * @param pVCpu The cross context virtual CPU structure.
1154 * @param pVmxTransient The VMX-transient structure.
1155 */
1156DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1157{
1158 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1159 AssertRC(rc);
1160}
1161#endif /* VBOX_STRICT */
1162
1163
1164/**
1165 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1166 * transient structure.
1167 *
1168 * @param pVCpu The cross context virtual CPU structure.
1169 * @param pVmxTransient The VMX-transient structure.
1170 */
1171DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1172{
1173 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1174 {
1175 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1176 AssertRC(rc);
1177 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1178 }
1179}
1180
1181
1182/**
1183 * Reads the VM-exit interruption error code from the VMCS into the VMX
1184 * transient structure.
1185 *
1186 * @param pVCpu The cross context virtual CPU structure.
1187 * @param pVmxTransient The VMX-transient structure.
1188 */
1189DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1190{
1191 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1192 {
1193 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1194 AssertRC(rc);
1195 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1196 }
1197}
1198
1199
1200/**
1201 * Reads the VM-exit instruction length field from the VMCS into the VMX
1202 * transient structure.
1203 *
1204 * @param pVCpu The cross context virtual CPU structure.
1205 * @param pVmxTransient The VMX-transient structure.
1206 */
1207DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1208{
1209 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1210 {
1211 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1212 AssertRC(rc);
1213 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1214 }
1215}
1216
1217
1218/**
1219 * Reads the VM-exit instruction-information field from the VMCS into
1220 * the VMX transient structure.
1221 *
1222 * @param pVCpu The cross context virtual CPU structure.
1223 * @param pVmxTransient The VMX-transient structure.
1224 */
1225DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1226{
1227 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1228 {
1229 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1230 AssertRC(rc);
1231 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1232 }
1233}
1234
1235
1236/**
1237 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1238 *
1239 * @param pVCpu The cross context virtual CPU structure.
1240 * @param pVmxTransient The VMX-transient structure.
1241 */
1242DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1245 {
1246 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1247 AssertRC(rc);
1248 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1249 }
1250}
1251
1252
1253/**
1254 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1255 *
1256 * @param pVCpu The cross context virtual CPU structure.
1257 * @param pVmxTransient The VMX-transient structure.
1258 */
1259DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1260{
1261 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1262 {
1263 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1264 AssertRC(rc);
1265 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1266 }
1267}
1268
1269
1270/**
1271 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1272 *
1273 * @param pVCpu The cross context virtual CPU structure.
1274 * @param pVmxTransient The VMX-transient structure.
1275 */
1276DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1277{
1278 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1279 {
1280 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1281 AssertRC(rc);
1282 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1283 }
1284}
1285
1286#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1287/**
1288 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1289 * structure.
1290 *
1291 * @param pVCpu The cross context virtual CPU structure.
1292 * @param pVmxTransient The VMX-transient structure.
1293 */
1294DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1295{
1296 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1297 {
1298 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1299 AssertRC(rc);
1300 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1301 }
1302}
1303#endif
1304
1305/**
1306 * Reads the IDT-vectoring information field from the VMCS into the VMX
1307 * transient structure.
1308 *
1309 * @param pVCpu The cross context virtual CPU structure.
1310 * @param pVmxTransient The VMX-transient structure.
1311 *
1312 * @remarks No-long-jump zone!!!
1313 */
1314DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1315{
1316 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1317 {
1318 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1319 AssertRC(rc);
1320 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1321 }
1322}
1323
1324
1325/**
1326 * Reads the IDT-vectoring error code from the VMCS into the VMX
1327 * transient structure.
1328 *
1329 * @param pVCpu The cross context virtual CPU structure.
1330 * @param pVmxTransient The VMX-transient structure.
1331 */
1332DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1333{
1334 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1335 {
1336 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1337 AssertRC(rc);
1338 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1339 }
1340}
1341
1342#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1343/**
1344 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1345 *
1346 * @param pVCpu The cross context virtual CPU structure.
1347 * @param pVmxTransient The VMX-transient structure.
1348 */
1349static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1350{
1351 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1352 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1353 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1354 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1355 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1356 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1357 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1358 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1359 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1360 AssertRC(rc);
1361 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1362 | HMVMX_READ_EXIT_INSTR_LEN
1363 | HMVMX_READ_EXIT_INSTR_INFO
1364 | HMVMX_READ_IDT_VECTORING_INFO
1365 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1366 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1367 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1368 | HMVMX_READ_GUEST_LINEAR_ADDR
1369 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1370}
1371#endif
1372
1373/**
1374 * Verifies that our cached values of the VMCS fields are all consistent with
1375 * what's actually present in the VMCS.
1376 *
1377 * @returns VBox status code.
1378 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1379 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1380 * VMCS content. HMCPU error-field is
1381 * updated, see VMX_VCI_XXX.
1382 * @param pVCpu The cross context virtual CPU structure.
1383 * @param pVmcsInfo The VMCS info. object.
1384 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1385 */
1386static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1387{
1388 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1389
1390 uint32_t u32Val;
1391 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1394 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1399 AssertRC(rc);
1400 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1401 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1402 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1403 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1404
1405 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1406 AssertRC(rc);
1407 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1408 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1409 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1410 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1411
1412 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1413 AssertRC(rc);
1414 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1415 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1416 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1417 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1418
1419 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1420 {
1421 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1422 AssertRC(rc);
1423 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1424 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1425 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1426 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1427 }
1428
1429 uint64_t u64Val;
1430 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1431 {
1432 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1433 AssertRC(rc);
1434 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1435 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1436 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1437 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1438 }
1439
1440 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1441 AssertRC(rc);
1442 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1443 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1444 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1445 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1446
1447 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1448 AssertRC(rc);
1449 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1450 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1451 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1452 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1453
1454 NOREF(pcszVmcs);
1455 return VINF_SUCCESS;
1456}
1457
1458
1459/**
1460 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1461 * VMCS.
1462 *
1463 * This is typically required when the guest changes paging mode.
1464 *
1465 * @returns VBox status code.
1466 * @param pVCpu The cross context virtual CPU structure.
1467 * @param pVmxTransient The VMX-transient structure.
1468 *
1469 * @remarks Requires EFER.
1470 * @remarks No-long-jump zone!!!
1471 */
1472static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1473{
1474 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1475 {
1476 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1477 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1478
1479 /*
1480 * VM-entry controls.
1481 */
1482 {
1483 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1484 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1485
1486 /*
1487 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1488 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1489 *
1490 * For nested-guests, this is a mandatory VM-entry control. It's also
1491 * required because we do not want to leak host bits to the nested-guest.
1492 */
1493 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1494
1495 /*
1496 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1497 *
1498 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1499 * required to get the nested-guest working with hardware-assisted VMX execution.
1500 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1501 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1502 * here rather than while merging the guest VMCS controls.
1503 */
1504 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1505 {
1506 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1507 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1508 }
1509 else
1510 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1511
1512 /*
1513 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1514 *
1515 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1516 * regardless of whether the nested-guest VMCS specifies it because we are free to
1517 * load whatever MSRs we require and we do not need to modify the guest visible copy
1518 * of the VM-entry MSR load area.
1519 */
1520 if ( g_fHmVmxSupportsVmcsEfer
1521#ifndef IN_NEM_DARWIN
1522 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1523#endif
1524 )
1525 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1526 else
1527 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1528
1529 /*
1530 * The following should -not- be set (since we're not in SMM mode):
1531 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1532 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1533 */
1534
1535 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1536 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1537
1538 if ((fVal & fZap) == fVal)
1539 { /* likely */ }
1540 else
1541 {
1542 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1543 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1544 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1545 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1546 }
1547
1548 /* Commit it to the VMCS. */
1549 if (pVmcsInfo->u32EntryCtls != fVal)
1550 {
1551 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1552 AssertRC(rc);
1553 pVmcsInfo->u32EntryCtls = fVal;
1554 }
1555 }
1556
1557 /*
1558 * VM-exit controls.
1559 */
1560 {
1561 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1562 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1563
1564 /*
1565 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1566 * supported the 1-setting of this bit.
1567 *
1568 * For nested-guests, we set the "save debug controls" as the converse
1569 * "load debug controls" is mandatory for nested-guests anyway.
1570 */
1571 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1572
1573 /*
1574 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1575 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1576 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1577 * vmxHCExportHostMsrs().
1578 *
1579 * For nested-guests, we always set this bit as we do not support 32-bit
1580 * hosts.
1581 */
1582 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1583
1584#ifndef IN_NEM_DARWIN
1585 /*
1586 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1587 *
1588 * For nested-guests, we should use the "save IA32_EFER" control if we also
1589 * used the "load IA32_EFER" control while exporting VM-entry controls.
1590 */
1591 if ( g_fHmVmxSupportsVmcsEfer
1592 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1593 {
1594 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1595 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1596 }
1597#endif
1598
1599 /*
1600 * Enable saving of the VMX-preemption timer value on VM-exit.
1601 * For nested-guests, currently not exposed/used.
1602 */
1603 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1604 * the timer value. */
1605 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1606 {
1607 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1608 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1609 }
1610
1611 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1612 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1613
1614 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1615 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1616 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1617
1618 if ((fVal & fZap) == fVal)
1619 { /* likely */ }
1620 else
1621 {
1622 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1623 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1624 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1625 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1626 }
1627
1628 /* Commit it to the VMCS. */
1629 if (pVmcsInfo->u32ExitCtls != fVal)
1630 {
1631 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1632 AssertRC(rc);
1633 pVmcsInfo->u32ExitCtls = fVal;
1634 }
1635 }
1636
1637 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1638 }
1639 return VINF_SUCCESS;
1640}
1641
1642
1643/**
1644 * Sets the TPR threshold in the VMCS.
1645 *
1646 * @param pVCpu The cross context virtual CPU structure.
1647 * @param pVmcsInfo The VMCS info. object.
1648 * @param u32TprThreshold The TPR threshold (task-priority class only).
1649 */
1650DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1651{
1652 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1653 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1654 RT_NOREF(pVmcsInfo);
1655 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1656 AssertRC(rc);
1657}
1658
1659
1660/**
1661 * Exports the guest APIC TPR state into the VMCS.
1662 *
1663 * @param pVCpu The cross context virtual CPU structure.
1664 * @param pVmxTransient The VMX-transient structure.
1665 *
1666 * @remarks No-long-jump zone!!!
1667 */
1668static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1669{
1670 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1671 {
1672 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1673
1674 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1675 if (!pVmxTransient->fIsNestedGuest)
1676 {
1677 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1678 && APICIsEnabled(pVCpu))
1679 {
1680 /*
1681 * Setup TPR shadowing.
1682 */
1683 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1684 {
1685 bool fPendingIntr = false;
1686 uint8_t u8Tpr = 0;
1687 uint8_t u8PendingIntr = 0;
1688 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1689 AssertRC(rc);
1690
1691 /*
1692 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1693 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1694 * priority of the pending interrupt so we can deliver the interrupt. If there
1695 * are no interrupts pending, set threshold to 0 to not cause any
1696 * TPR-below-threshold VM-exits.
1697 */
1698 uint32_t u32TprThreshold = 0;
1699 if (fPendingIntr)
1700 {
1701 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1702 (which is the Task-Priority Class). */
1703 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1704 const uint8_t u8TprPriority = u8Tpr >> 4;
1705 if (u8PendingPriority <= u8TprPriority)
1706 u32TprThreshold = u8PendingPriority;
1707 }
1708
1709 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1710 }
1711 }
1712 }
1713 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1714 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1715 }
1716}
1717
1718
1719/**
1720 * Gets the guest interruptibility-state and updates related force-flags.
1721 *
1722 * @returns Guest's interruptibility-state.
1723 * @param pVCpu The cross context virtual CPU structure.
1724 *
1725 * @remarks No-long-jump zone!!!
1726 */
1727static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1728{
1729 /*
1730 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1731 */
1732 uint32_t fIntrState = 0;
1733 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1734 {
1735 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1736 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1737
1738 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1739 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1740 {
1741 if (pCtx->eflags.Bits.u1IF)
1742 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1743 else
1744 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1745 }
1746 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1747 {
1748 /*
1749 * We can clear the inhibit force flag as even if we go back to the recompiler
1750 * without executing guest code in VT-x, the flag's condition to be cleared is
1751 * met and thus the cleared state is correct.
1752 */
1753 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1754 }
1755 }
1756
1757 /*
1758 * Check if we should inhibit NMI delivery.
1759 */
1760 if (CPUMIsGuestNmiBlocking(pVCpu))
1761 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1762
1763 /*
1764 * Validate.
1765 */
1766#ifdef VBOX_STRICT
1767 /* We don't support block-by-SMI yet.*/
1768 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1769
1770 /* Block-by-STI must not be set when interrupts are disabled. */
1771 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1772 {
1773 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1774 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1775 }
1776#endif
1777
1778 return fIntrState;
1779}
1780
1781
1782/**
1783 * Exports the exception intercepts required for guest execution in the VMCS.
1784 *
1785 * @param pVCpu The cross context virtual CPU structure.
1786 * @param pVmxTransient The VMX-transient structure.
1787 *
1788 * @remarks No-long-jump zone!!!
1789 */
1790static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1791{
1792 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1793 {
1794 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1795 if ( !pVmxTransient->fIsNestedGuest
1796 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1797 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1798 else
1799 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1800
1801 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1802 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1803 }
1804}
1805
1806
1807/**
1808 * Exports the guest's RIP into the guest-state area in the VMCS.
1809 *
1810 * @param pVCpu The cross context virtual CPU structure.
1811 *
1812 * @remarks No-long-jump zone!!!
1813 */
1814static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1815{
1816 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1817 {
1818 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1819
1820 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1821 AssertRC(rc);
1822
1823 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1824 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1825 }
1826}
1827
1828
1829/**
1830 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1831 *
1832 * @param pVCpu The cross context virtual CPU structure.
1833 * @param pVmxTransient The VMX-transient structure.
1834 *
1835 * @remarks No-long-jump zone!!!
1836 */
1837static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1838{
1839 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1840 {
1841 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1842
1843 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1844 Let us assert it as such and use 32-bit VMWRITE. */
1845 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1846 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1847 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1848 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1849
1850#ifndef IN_NEM_DARWIN
1851 /*
1852 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1853 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1854 * can run the real-mode guest code under Virtual 8086 mode.
1855 */
1856 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1857 if (pVmcsInfo->RealMode.fRealOnV86Active)
1858 {
1859 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1860 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1861 Assert(!pVmxTransient->fIsNestedGuest);
1862 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1863 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1864 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1865 }
1866#else
1867 RT_NOREF(pVmxTransient);
1868#endif
1869
1870 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1871 AssertRC(rc);
1872
1873 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1874 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1875 }
1876}
1877
1878
1879#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1880/**
1881 * Copies the nested-guest VMCS to the shadow VMCS.
1882 *
1883 * @returns VBox status code.
1884 * @param pVCpu The cross context virtual CPU structure.
1885 * @param pVmcsInfo The VMCS info. object.
1886 *
1887 * @remarks No-long-jump zone!!!
1888 */
1889static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1890{
1891 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1892 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1893
1894 /*
1895 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1896 * current VMCS, as we may try saving guest lazy MSRs.
1897 *
1898 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1899 * calling the import VMCS code which is currently performing the guest MSR reads
1900 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1901 * and the rest of the VMX leave session machinery.
1902 */
1903 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1904
1905 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1906 if (RT_SUCCESS(rc))
1907 {
1908 /*
1909 * Copy all guest read/write VMCS fields.
1910 *
1911 * We don't check for VMWRITE failures here for performance reasons and
1912 * because they are not expected to fail, barring irrecoverable conditions
1913 * like hardware errors.
1914 */
1915 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1916 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1917 {
1918 uint64_t u64Val;
1919 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1920 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1921 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1922 }
1923
1924 /*
1925 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1926 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1927 */
1928 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1929 {
1930 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1931 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1932 {
1933 uint64_t u64Val;
1934 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1935 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1936 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1937 }
1938 }
1939
1940 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1941 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1942 }
1943
1944 ASMSetFlags(fEFlags);
1945 return rc;
1946}
1947
1948
1949/**
1950 * Copies the shadow VMCS to the nested-guest VMCS.
1951 *
1952 * @returns VBox status code.
1953 * @param pVCpu The cross context virtual CPU structure.
1954 * @param pVmcsInfo The VMCS info. object.
1955 *
1956 * @remarks Called with interrupts disabled.
1957 */
1958static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1959{
1960 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1961 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1962 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1963
1964 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1965 if (RT_SUCCESS(rc))
1966 {
1967 /*
1968 * Copy guest read/write fields from the shadow VMCS.
1969 * Guest read-only fields cannot be modified, so no need to copy them.
1970 *
1971 * We don't check for VMREAD failures here for performance reasons and
1972 * because they are not expected to fail, barring irrecoverable conditions
1973 * like hardware errors.
1974 */
1975 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1976 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1977 {
1978 uint64_t u64Val;
1979 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1980 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1981 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1982 }
1983
1984 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1985 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1986 }
1987 return rc;
1988}
1989
1990
1991/**
1992 * Enables VMCS shadowing for the given VMCS info. object.
1993 *
1994 * @param pVCpu The cross context virtual CPU structure.
1995 * @param pVmcsInfo The VMCS info. object.
1996 *
1997 * @remarks No-long-jump zone!!!
1998 */
1999static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2000{
2001 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2002 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2003 {
2004 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2005 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2007 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2008 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2009 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2010 Log4Func(("Enabled\n"));
2011 }
2012}
2013
2014
2015/**
2016 * Disables VMCS shadowing for the given VMCS info. object.
2017 *
2018 * @param pVCpu The cross context virtual CPU structure.
2019 * @param pVmcsInfo The VMCS info. object.
2020 *
2021 * @remarks No-long-jump zone!!!
2022 */
2023static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2024{
2025 /*
2026 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2027 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2028 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2029 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2030 *
2031 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2032 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2033 */
2034 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2035 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2036 {
2037 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2038 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2039 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2040 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2041 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2042 Log4Func(("Disabled\n"));
2043 }
2044}
2045#endif
2046
2047
2048/**
2049 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2050 *
2051 * The guest FPU state is always pre-loaded hence we don't need to bother about
2052 * sharing FPU related CR0 bits between the guest and host.
2053 *
2054 * @returns VBox status code.
2055 * @param pVCpu The cross context virtual CPU structure.
2056 * @param pVmxTransient The VMX-transient structure.
2057 *
2058 * @remarks No-long-jump zone!!!
2059 */
2060static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2061{
2062 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2063 {
2064 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2065 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2066
2067 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2068 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2069 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2070 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2071 else
2072 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2073
2074 if (!pVmxTransient->fIsNestedGuest)
2075 {
2076 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2077 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2078 uint64_t const u64ShadowCr0 = u64GuestCr0;
2079 Assert(!RT_HI_U32(u64GuestCr0));
2080
2081 /*
2082 * Setup VT-x's view of the guest CR0.
2083 */
2084 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2085 if (VM_IS_VMX_NESTED_PAGING(pVM))
2086 {
2087#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2088 if (CPUMIsGuestPagingEnabled(pVCpu))
2089 {
2090 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2091 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2092 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2093 }
2094 else
2095 {
2096 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2097 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2098 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2099 }
2100
2101 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2102 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2103 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2104#endif
2105 }
2106 else
2107 {
2108 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2109 u64GuestCr0 |= X86_CR0_WP;
2110 }
2111
2112 /*
2113 * Guest FPU bits.
2114 *
2115 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2116 * using CR0.TS.
2117 *
2118 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2119 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2120 */
2121 u64GuestCr0 |= X86_CR0_NE;
2122
2123 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2124 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2125
2126 /*
2127 * Update exception intercepts.
2128 */
2129 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2130#ifndef IN_NEM_DARWIN
2131 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2132 {
2133 Assert(PDMVmmDevHeapIsEnabled(pVM));
2134 Assert(pVM->hm.s.vmx.pRealModeTSS);
2135 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2136 }
2137 else
2138#endif
2139 {
2140 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2141 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2142 if (fInterceptMF)
2143 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2144 }
2145
2146 /* Additional intercepts for debugging, define these yourself explicitly. */
2147#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2148 uXcptBitmap |= 0
2149 | RT_BIT(X86_XCPT_BP)
2150 | RT_BIT(X86_XCPT_DE)
2151 | RT_BIT(X86_XCPT_NM)
2152 | RT_BIT(X86_XCPT_TS)
2153 | RT_BIT(X86_XCPT_UD)
2154 | RT_BIT(X86_XCPT_NP)
2155 | RT_BIT(X86_XCPT_SS)
2156 | RT_BIT(X86_XCPT_GP)
2157 | RT_BIT(X86_XCPT_PF)
2158 | RT_BIT(X86_XCPT_MF)
2159 ;
2160#elif defined(HMVMX_ALWAYS_TRAP_PF)
2161 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2162#endif
2163 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2164 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2165 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2166 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2167 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2168
2169 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2170 u64GuestCr0 |= fSetCr0;
2171 u64GuestCr0 &= fZapCr0;
2172 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2173
2174 /* Commit the CR0 and related fields to the guest VMCS. */
2175 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2176 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2177 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2178 {
2179 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2180 AssertRC(rc);
2181 }
2182 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2183 {
2184 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2185 AssertRC(rc);
2186 }
2187
2188 /* Update our caches. */
2189 pVmcsInfo->u32ProcCtls = uProcCtls;
2190 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2191
2192 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2193 }
2194 else
2195 {
2196 /*
2197 * With nested-guests, we may have extended the guest/host mask here since we
2198 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2199 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2200 * originally supplied. We must copy those bits from the nested-guest CR0 into
2201 * the nested-guest CR0 read-shadow.
2202 */
2203 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2204 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2205 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2206 Assert(!RT_HI_U32(u64GuestCr0));
2207 Assert(u64GuestCr0 & X86_CR0_NE);
2208
2209 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2210 u64GuestCr0 |= fSetCr0;
2211 u64GuestCr0 &= fZapCr0;
2212 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2213
2214 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2215 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2216 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2217
2218 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2219 }
2220
2221 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2222 }
2223
2224 return VINF_SUCCESS;
2225}
2226
2227
2228/**
2229 * Exports the guest control registers (CR3, CR4) into the guest-state area
2230 * in the VMCS.
2231 *
2232 * @returns VBox strict status code.
2233 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2234 * without unrestricted guest access and the VMMDev is not presently
2235 * mapped (e.g. EFI32).
2236 *
2237 * @param pVCpu The cross context virtual CPU structure.
2238 * @param pVmxTransient The VMX-transient structure.
2239 *
2240 * @remarks No-long-jump zone!!!
2241 */
2242static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2243{
2244 int rc = VINF_SUCCESS;
2245 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2246
2247 /*
2248 * Guest CR2.
2249 * It's always loaded in the assembler code. Nothing to do here.
2250 */
2251
2252 /*
2253 * Guest CR3.
2254 */
2255 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2256 {
2257 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2258
2259 if (VM_IS_VMX_NESTED_PAGING(pVM))
2260 {
2261#ifndef IN_NEM_DARWIN
2262 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2263 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2264
2265 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2266 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2267 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2268 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2269
2270 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2271 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2272 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2273
2274 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2275 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2276 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2277 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2278 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2279 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2280 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2281
2282 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2283 AssertRC(rc);
2284#endif
2285
2286 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2287 uint64_t u64GuestCr3 = pCtx->cr3;
2288 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2289 || CPUMIsGuestPagingEnabledEx(pCtx))
2290 {
2291 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2292 if (CPUMIsGuestInPAEModeEx(pCtx))
2293 {
2294 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2295 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2296 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2297 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2298 }
2299
2300 /*
2301 * The guest's view of its CR3 is unblemished with nested paging when the
2302 * guest is using paging or we have unrestricted guest execution to handle
2303 * the guest when it's not using paging.
2304 */
2305 }
2306#ifndef IN_NEM_DARWIN
2307 else
2308 {
2309 /*
2310 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2311 * thinks it accesses physical memory directly, we use our identity-mapped
2312 * page table to map guest-linear to guest-physical addresses. EPT takes care
2313 * of translating it to host-physical addresses.
2314 */
2315 RTGCPHYS GCPhys;
2316 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2317
2318 /* We obtain it here every time as the guest could have relocated this PCI region. */
2319 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2320 if (RT_SUCCESS(rc))
2321 { /* likely */ }
2322 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2323 {
2324 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2325 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2326 }
2327 else
2328 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2329
2330 u64GuestCr3 = GCPhys;
2331 }
2332#endif
2333
2334 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2335 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2336 AssertRC(rc);
2337 }
2338 else
2339 {
2340 Assert(!pVmxTransient->fIsNestedGuest);
2341 /* Non-nested paging case, just use the hypervisor's CR3. */
2342 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2343
2344 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2345 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2346 AssertRC(rc);
2347 }
2348
2349 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2350 }
2351
2352 /*
2353 * Guest CR4.
2354 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2355 */
2356 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2357 {
2358 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2359 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2360
2361 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2362 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2363
2364 /*
2365 * With nested-guests, we may have extended the guest/host mask here (since we
2366 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2367 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2368 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2369 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2370 */
2371 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2372 uint64_t u64GuestCr4 = pCtx->cr4;
2373 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2374 ? pCtx->cr4
2375 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2376 Assert(!RT_HI_U32(u64GuestCr4));
2377
2378#ifndef IN_NEM_DARWIN
2379 /*
2380 * Setup VT-x's view of the guest CR4.
2381 *
2382 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2383 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2384 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2385 *
2386 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2387 */
2388 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2389 {
2390 Assert(pVM->hm.s.vmx.pRealModeTSS);
2391 Assert(PDMVmmDevHeapIsEnabled(pVM));
2392 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2393 }
2394#endif
2395
2396 if (VM_IS_VMX_NESTED_PAGING(pVM))
2397 {
2398 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2399 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2400 {
2401 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2402 u64GuestCr4 |= X86_CR4_PSE;
2403 /* Our identity mapping is a 32-bit page directory. */
2404 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2405 }
2406 /* else use guest CR4.*/
2407 }
2408 else
2409 {
2410 Assert(!pVmxTransient->fIsNestedGuest);
2411
2412 /*
2413 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2414 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2415 */
2416 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2417 {
2418 case PGMMODE_REAL: /* Real-mode. */
2419 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2420 case PGMMODE_32_BIT: /* 32-bit paging. */
2421 {
2422 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2423 break;
2424 }
2425
2426 case PGMMODE_PAE: /* PAE paging. */
2427 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2428 {
2429 u64GuestCr4 |= X86_CR4_PAE;
2430 break;
2431 }
2432
2433 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2434 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2435 {
2436#ifdef VBOX_WITH_64_BITS_GUESTS
2437 /* For our assumption in vmxHCShouldSwapEferMsr. */
2438 Assert(u64GuestCr4 & X86_CR4_PAE);
2439 break;
2440#endif
2441 }
2442 default:
2443 AssertFailed();
2444 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2445 }
2446 }
2447
2448 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2449 u64GuestCr4 |= fSetCr4;
2450 u64GuestCr4 &= fZapCr4;
2451
2452 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2453 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2454 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2455
2456#ifndef IN_NEM_DARWIN
2457 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2458 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2459 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2460 {
2461 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2462 hmR0VmxUpdateStartVmFunction(pVCpu);
2463 }
2464#endif
2465
2466 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2467
2468 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2469 }
2470 return rc;
2471}
2472
2473
2474#ifdef VBOX_STRICT
2475/**
2476 * Strict function to validate segment registers.
2477 *
2478 * @param pVCpu The cross context virtual CPU structure.
2479 * @param pVmcsInfo The VMCS info. object.
2480 *
2481 * @remarks Will import guest CR0 on strict builds during validation of
2482 * segments.
2483 */
2484static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2485{
2486 /*
2487 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2488 *
2489 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2490 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2491 * unusable bit and doesn't change the guest-context value.
2492 */
2493 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2494 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2495 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2496 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2497 && ( !CPUMIsGuestInRealModeEx(pCtx)
2498 && !CPUMIsGuestInV86ModeEx(pCtx)))
2499 {
2500 /* Protected mode checks */
2501 /* CS */
2502 Assert(pCtx->cs.Attr.n.u1Present);
2503 Assert(!(pCtx->cs.Attr.u & 0xf00));
2504 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2505 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2506 || !(pCtx->cs.Attr.n.u1Granularity));
2507 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2508 || (pCtx->cs.Attr.n.u1Granularity));
2509 /* CS cannot be loaded with NULL in protected mode. */
2510 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2511 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2512 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2513 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2514 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2515 else
2516 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2517 /* SS */
2518 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2519 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2520 if ( !(pCtx->cr0 & X86_CR0_PE)
2521 || pCtx->cs.Attr.n.u4Type == 3)
2522 {
2523 Assert(!pCtx->ss.Attr.n.u2Dpl);
2524 }
2525 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2526 {
2527 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2528 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2529 Assert(pCtx->ss.Attr.n.u1Present);
2530 Assert(!(pCtx->ss.Attr.u & 0xf00));
2531 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2532 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2533 || !(pCtx->ss.Attr.n.u1Granularity));
2534 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2535 || (pCtx->ss.Attr.n.u1Granularity));
2536 }
2537 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2538 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2539 {
2540 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2541 Assert(pCtx->ds.Attr.n.u1Present);
2542 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2543 Assert(!(pCtx->ds.Attr.u & 0xf00));
2544 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2545 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2546 || !(pCtx->ds.Attr.n.u1Granularity));
2547 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2548 || (pCtx->ds.Attr.n.u1Granularity));
2549 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2550 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2551 }
2552 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2553 {
2554 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2555 Assert(pCtx->es.Attr.n.u1Present);
2556 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2557 Assert(!(pCtx->es.Attr.u & 0xf00));
2558 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2559 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2560 || !(pCtx->es.Attr.n.u1Granularity));
2561 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2562 || (pCtx->es.Attr.n.u1Granularity));
2563 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2564 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2565 }
2566 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2567 {
2568 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2569 Assert(pCtx->fs.Attr.n.u1Present);
2570 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2571 Assert(!(pCtx->fs.Attr.u & 0xf00));
2572 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2573 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2574 || !(pCtx->fs.Attr.n.u1Granularity));
2575 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2576 || (pCtx->fs.Attr.n.u1Granularity));
2577 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2578 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2579 }
2580 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2581 {
2582 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2583 Assert(pCtx->gs.Attr.n.u1Present);
2584 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2585 Assert(!(pCtx->gs.Attr.u & 0xf00));
2586 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2587 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2588 || !(pCtx->gs.Attr.n.u1Granularity));
2589 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2590 || (pCtx->gs.Attr.n.u1Granularity));
2591 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2592 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2593 }
2594 /* 64-bit capable CPUs. */
2595 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2596 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2597 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2598 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2599 }
2600 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2601 || ( CPUMIsGuestInRealModeEx(pCtx)
2602 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2603 {
2604 /* Real and v86 mode checks. */
2605 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2606 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2607#ifndef IN_NEM_DARWIN
2608 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2609 {
2610 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2611 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2612 }
2613 else
2614#endif
2615 {
2616 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2617 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2618 }
2619
2620 /* CS */
2621 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2622 Assert(pCtx->cs.u32Limit == 0xffff);
2623 Assert(u32CSAttr == 0xf3);
2624 /* SS */
2625 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2626 Assert(pCtx->ss.u32Limit == 0xffff);
2627 Assert(u32SSAttr == 0xf3);
2628 /* DS */
2629 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2630 Assert(pCtx->ds.u32Limit == 0xffff);
2631 Assert(u32DSAttr == 0xf3);
2632 /* ES */
2633 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2634 Assert(pCtx->es.u32Limit == 0xffff);
2635 Assert(u32ESAttr == 0xf3);
2636 /* FS */
2637 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2638 Assert(pCtx->fs.u32Limit == 0xffff);
2639 Assert(u32FSAttr == 0xf3);
2640 /* GS */
2641 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2642 Assert(pCtx->gs.u32Limit == 0xffff);
2643 Assert(u32GSAttr == 0xf3);
2644 /* 64-bit capable CPUs. */
2645 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2646 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2647 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2648 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2649 }
2650}
2651#endif /* VBOX_STRICT */
2652
2653
2654/**
2655 * Exports a guest segment register into the guest-state area in the VMCS.
2656 *
2657 * @returns VBox status code.
2658 * @param pVCpu The cross context virtual CPU structure.
2659 * @param pVmcsInfo The VMCS info. object.
2660 * @param iSegReg The segment register number (X86_SREG_XXX).
2661 * @param pSelReg Pointer to the segment selector.
2662 *
2663 * @remarks No-long-jump zone!!!
2664 */
2665static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2666{
2667 Assert(iSegReg < X86_SREG_COUNT);
2668
2669 uint32_t u32Access = pSelReg->Attr.u;
2670#ifndef IN_NEM_DARWIN
2671 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2672#endif
2673 {
2674 /*
2675 * The way to differentiate between whether this is really a null selector or was just
2676 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2677 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2678 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2679 * NULL selectors loaded in protected-mode have their attribute as 0.
2680 */
2681 if (u32Access)
2682 { }
2683 else
2684 u32Access = X86DESCATTR_UNUSABLE;
2685 }
2686#ifndef IN_NEM_DARWIN
2687 else
2688 {
2689 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2690 u32Access = 0xf3;
2691 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2692 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2693 RT_NOREF_PV(pVCpu);
2694 }
2695#else
2696 RT_NOREF(pVmcsInfo);
2697#endif
2698
2699 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2700 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2701 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2702
2703 /*
2704 * Commit it to the VMCS.
2705 */
2706 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2707 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2708 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2709 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2710 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2711 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2712 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2713 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2714 return VINF_SUCCESS;
2715}
2716
2717
2718/**
2719 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2720 * area in the VMCS.
2721 *
2722 * @returns VBox status code.
2723 * @param pVCpu The cross context virtual CPU structure.
2724 * @param pVmxTransient The VMX-transient structure.
2725 *
2726 * @remarks Will import guest CR0 on strict builds during validation of
2727 * segments.
2728 * @remarks No-long-jump zone!!!
2729 */
2730static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2731{
2732 int rc = VERR_INTERNAL_ERROR_5;
2733#ifndef IN_NEM_DARWIN
2734 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2735#endif
2736 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2737 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2738#ifndef IN_NEM_DARWIN
2739 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2740#endif
2741
2742 /*
2743 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2744 */
2745 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2746 {
2747 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2748 {
2749 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2750#ifndef IN_NEM_DARWIN
2751 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2752 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2753#endif
2754 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2755 AssertRC(rc);
2756 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2757 }
2758
2759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2760 {
2761 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2762#ifndef IN_NEM_DARWIN
2763 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2764 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2765#endif
2766 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2767 AssertRC(rc);
2768 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2769 }
2770
2771 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2772 {
2773 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2774#ifndef IN_NEM_DARWIN
2775 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2776 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2777#endif
2778 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2779 AssertRC(rc);
2780 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2781 }
2782
2783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2784 {
2785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2786#ifndef IN_NEM_DARWIN
2787 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2788 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2789#endif
2790 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2791 AssertRC(rc);
2792 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2793 }
2794
2795 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2796 {
2797 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2798#ifndef IN_NEM_DARWIN
2799 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2800 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2801#endif
2802 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2803 AssertRC(rc);
2804 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2805 }
2806
2807 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2808 {
2809 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2810#ifndef IN_NEM_DARWIN
2811 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2812 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2813#endif
2814 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2815 AssertRC(rc);
2816 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2817 }
2818
2819#ifdef VBOX_STRICT
2820 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2821#endif
2822 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2823 pCtx->cs.Attr.u));
2824 }
2825
2826 /*
2827 * Guest TR.
2828 */
2829 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2830 {
2831 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2832
2833 /*
2834 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2835 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2836 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2837 */
2838 uint16_t u16Sel;
2839 uint32_t u32Limit;
2840 uint64_t u64Base;
2841 uint32_t u32AccessRights;
2842#ifndef IN_NEM_DARWIN
2843 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2844#endif
2845 {
2846 u16Sel = pCtx->tr.Sel;
2847 u32Limit = pCtx->tr.u32Limit;
2848 u64Base = pCtx->tr.u64Base;
2849 u32AccessRights = pCtx->tr.Attr.u;
2850 }
2851#ifndef IN_NEM_DARWIN
2852 else
2853 {
2854 Assert(!pVmxTransient->fIsNestedGuest);
2855 Assert(pVM->hm.s.vmx.pRealModeTSS);
2856 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2857
2858 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2859 RTGCPHYS GCPhys;
2860 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2861 AssertRCReturn(rc, rc);
2862
2863 X86DESCATTR DescAttr;
2864 DescAttr.u = 0;
2865 DescAttr.n.u1Present = 1;
2866 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2867
2868 u16Sel = 0;
2869 u32Limit = HM_VTX_TSS_SIZE;
2870 u64Base = GCPhys;
2871 u32AccessRights = DescAttr.u;
2872 }
2873#endif
2874
2875 /* Validate. */
2876 Assert(!(u16Sel & RT_BIT(2)));
2877 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2878 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2879 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2880 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2881 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2882 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2883 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2884 Assert( (u32Limit & 0xfff) == 0xfff
2885 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2886 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2887 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2888
2889 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2890 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2891 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2892 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2893
2894 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2895 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2896 }
2897
2898 /*
2899 * Guest GDTR.
2900 */
2901 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2902 {
2903 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2904
2905 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2906 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2907
2908 /* Validate. */
2909 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2910
2911 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2912 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2913 }
2914
2915 /*
2916 * Guest LDTR.
2917 */
2918 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2919 {
2920 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2921
2922 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2923 uint32_t u32Access;
2924 if ( !pVmxTransient->fIsNestedGuest
2925 && !pCtx->ldtr.Attr.u)
2926 u32Access = X86DESCATTR_UNUSABLE;
2927 else
2928 u32Access = pCtx->ldtr.Attr.u;
2929
2930 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2931 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2932 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2933 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2934
2935 /* Validate. */
2936 if (!(u32Access & X86DESCATTR_UNUSABLE))
2937 {
2938 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2939 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2940 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2941 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2942 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2943 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2944 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2945 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2946 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2947 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2948 }
2949
2950 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2951 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2952 }
2953
2954 /*
2955 * Guest IDTR.
2956 */
2957 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2958 {
2959 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2960
2961 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2962 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2963
2964 /* Validate. */
2965 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2966
2967 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2968 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2969 }
2970
2971 return VINF_SUCCESS;
2972}
2973
2974
2975/**
2976 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2977 * VM-exit interruption info type.
2978 *
2979 * @returns The IEM exception flags.
2980 * @param uVector The event vector.
2981 * @param uVmxEventType The VMX event type.
2982 *
2983 * @remarks This function currently only constructs flags required for
2984 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2985 * and CR2 aspects of an exception are not included).
2986 */
2987static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2988{
2989 uint32_t fIemXcptFlags;
2990 switch (uVmxEventType)
2991 {
2992 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2993 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2994 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2995 break;
2996
2997 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2998 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2999 break;
3000
3001 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3002 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3003 break;
3004
3005 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3006 {
3007 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3008 if (uVector == X86_XCPT_BP)
3009 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3010 else if (uVector == X86_XCPT_OF)
3011 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3012 else
3013 {
3014 fIemXcptFlags = 0;
3015 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3016 }
3017 break;
3018 }
3019
3020 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3021 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3022 break;
3023
3024 default:
3025 fIemXcptFlags = 0;
3026 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3027 break;
3028 }
3029 return fIemXcptFlags;
3030}
3031
3032
3033/**
3034 * Sets an event as a pending event to be injected into the guest.
3035 *
3036 * @param pVCpu The cross context virtual CPU structure.
3037 * @param u32IntInfo The VM-entry interruption-information field.
3038 * @param cbInstr The VM-entry instruction length in bytes (for
3039 * software interrupts, exceptions and privileged
3040 * software exceptions).
3041 * @param u32ErrCode The VM-entry exception error code.
3042 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3043 * page-fault.
3044 */
3045DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3046 RTGCUINTPTR GCPtrFaultAddress)
3047{
3048 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3049 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3050 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3051 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3052 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3053 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3054}
3055
3056
3057/**
3058 * Sets an external interrupt as pending-for-injection into the VM.
3059 *
3060 * @param pVCpu The cross context virtual CPU structure.
3061 * @param u8Interrupt The external interrupt vector.
3062 */
3063DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3064{
3065 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3066 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3067 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3069 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3070}
3071
3072
3073/**
3074 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3075 *
3076 * @param pVCpu The cross context virtual CPU structure.
3077 */
3078DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3079{
3080 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3081 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3082 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3084 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3085}
3086
3087
3088/**
3089 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3090 *
3091 * @param pVCpu The cross context virtual CPU structure.
3092 */
3093DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3094{
3095 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3096 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3097 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3099 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3100}
3101
3102
3103/**
3104 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3105 *
3106 * @param pVCpu The cross context virtual CPU structure.
3107 */
3108DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3109{
3110 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3111 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3112 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3113 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3114 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3115}
3116
3117
3118/**
3119 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3120 *
3121 * @param pVCpu The cross context virtual CPU structure.
3122 */
3123DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3124{
3125 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3127 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3129 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3130}
3131
3132
3133#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3134/**
3135 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3136 *
3137 * @param pVCpu The cross context virtual CPU structure.
3138 * @param u32ErrCode The error code for the general-protection exception.
3139 */
3140DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3141{
3142 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3143 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3144 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3145 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3146 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3147}
3148
3149
3150/**
3151 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3152 *
3153 * @param pVCpu The cross context virtual CPU structure.
3154 * @param u32ErrCode The error code for the stack exception.
3155 */
3156DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3157{
3158 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3159 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3160 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3161 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3162 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3163}
3164#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3165
3166
3167/**
3168 * Fixes up attributes for the specified segment register.
3169 *
3170 * @param pVCpu The cross context virtual CPU structure.
3171 * @param pSelReg The segment register that needs fixing.
3172 * @param pszRegName The register name (for logging and assertions).
3173 */
3174static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3175{
3176 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3177
3178 /*
3179 * If VT-x marks the segment as unusable, most other bits remain undefined:
3180 * - For CS the L, D and G bits have meaning.
3181 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3182 * - For the remaining data segments no bits are defined.
3183 *
3184 * The present bit and the unusable bit has been observed to be set at the
3185 * same time (the selector was supposed to be invalid as we started executing
3186 * a V8086 interrupt in ring-0).
3187 *
3188 * What should be important for the rest of the VBox code, is that the P bit is
3189 * cleared. Some of the other VBox code recognizes the unusable bit, but
3190 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3191 * safe side here, we'll strip off P and other bits we don't care about. If
3192 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3193 *
3194 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3195 */
3196#ifdef VBOX_STRICT
3197 uint32_t const uAttr = pSelReg->Attr.u;
3198#endif
3199
3200 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3201 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3202 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3203
3204#ifdef VBOX_STRICT
3205# ifndef IN_NEM_DARWIN
3206 VMMRZCallRing3Disable(pVCpu);
3207# endif
3208 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3209# ifdef DEBUG_bird
3210 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3211 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3212 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3213# endif
3214# ifndef IN_NEM_DARWIN
3215 VMMRZCallRing3Enable(pVCpu);
3216# endif
3217 NOREF(uAttr);
3218#endif
3219 RT_NOREF2(pVCpu, pszRegName);
3220}
3221
3222
3223/**
3224 * Imports a guest segment register from the current VMCS into the guest-CPU
3225 * context.
3226 *
3227 * @param pVCpu The cross context virtual CPU structure.
3228 * @param iSegReg The segment register number (X86_SREG_XXX).
3229 *
3230 * @remarks Called with interrupts and/or preemption disabled.
3231 */
3232static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3233{
3234 Assert(iSegReg < X86_SREG_COUNT);
3235 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3236 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3237 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3238 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3239
3240 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3241
3242 uint16_t u16Sel;
3243 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3244 pSelReg->Sel = u16Sel;
3245 pSelReg->ValidSel = u16Sel;
3246
3247 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3248 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3249
3250 uint32_t u32Attr;
3251 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3252 pSelReg->Attr.u = u32Attr;
3253 if (u32Attr & X86DESCATTR_UNUSABLE)
3254 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3255
3256 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3257}
3258
3259
3260/**
3261 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3262 *
3263 * @param pVCpu The cross context virtual CPU structure.
3264 *
3265 * @remarks Called with interrupts and/or preemption disabled.
3266 */
3267static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3268{
3269 uint16_t u16Sel;
3270 uint64_t u64Base;
3271 uint32_t u32Limit, u32Attr;
3272 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3273 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3274 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3275 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3276
3277 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3278 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3279 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3280 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3281 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3282 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3283 if (u32Attr & X86DESCATTR_UNUSABLE)
3284 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3285}
3286
3287
3288/**
3289 * Imports the guest TR from the current VMCS into the guest-CPU context.
3290 *
3291 * @param pVCpu The cross context virtual CPU structure.
3292 *
3293 * @remarks Called with interrupts and/or preemption disabled.
3294 */
3295static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3296{
3297 uint16_t u16Sel;
3298 uint64_t u64Base;
3299 uint32_t u32Limit, u32Attr;
3300 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3301 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3302 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3303 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3304
3305 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3306 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3307 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3308 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3309 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3310 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3311 /* TR is the only selector that can never be unusable. */
3312 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3313}
3314
3315
3316/**
3317 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3318 *
3319 * @param pVCpu The cross context virtual CPU structure.
3320 *
3321 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3322 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3323 * instead!!!
3324 */
3325static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3326{
3327 uint64_t u64Val;
3328 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3329 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3330 {
3331 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3332 AssertRC(rc);
3333
3334 pCtx->rip = u64Val;
3335 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3336 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3337 }
3338}
3339
3340
3341/**
3342 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3343 *
3344 * @param pVCpu The cross context virtual CPU structure.
3345 * @param pVmcsInfo The VMCS info. object.
3346 *
3347 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3348 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3349 * instead!!!
3350 */
3351static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3352{
3353 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3354 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3355 {
3356 uint64_t u64Val;
3357 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3358 AssertRC(rc);
3359
3360 pCtx->rflags.u64 = u64Val;
3361#ifndef IN_NEM_DARWIN
3362 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3363 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3364 {
3365 pCtx->eflags.Bits.u1VM = 0;
3366 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3367 }
3368#else
3369 RT_NOREF(pVmcsInfo);
3370#endif
3371 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3372 }
3373}
3374
3375
3376/**
3377 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3378 * context.
3379 *
3380 * @param pVCpu The cross context virtual CPU structure.
3381 * @param pVmcsInfo The VMCS info. object.
3382 *
3383 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3384 * do not log!
3385 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3386 * instead!!!
3387 */
3388static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3389{
3390 uint32_t u32Val;
3391 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3392 if (!u32Val)
3393 {
3394 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3395 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3396 CPUMSetGuestNmiBlocking(pVCpu, false);
3397 }
3398 else
3399 {
3400 /*
3401 * We must import RIP here to set our EM interrupt-inhibited state.
3402 * We also import RFLAGS as our code that evaluates pending interrupts
3403 * before VM-entry requires it.
3404 */
3405 vmxHCImportGuestRip(pVCpu);
3406 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3407
3408 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3409 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3410 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3411 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3412
3413 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3414 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3415 }
3416}
3417
3418
3419/**
3420 * Worker for VMXR0ImportStateOnDemand.
3421 *
3422 * @returns VBox status code.
3423 * @param pVCpu The cross context virtual CPU structure.
3424 * @param pVmcsInfo The VMCS info. object.
3425 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3426 */
3427static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3428{
3429 int rc = VINF_SUCCESS;
3430 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3431 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3432 uint32_t u32Val;
3433
3434 /*
3435 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3436 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3437 * neither are other host platforms.
3438 *
3439 * Committing this temporarily as it prevents BSOD.
3440 *
3441 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3442 */
3443# ifdef RT_OS_WINDOWS
3444 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3445 return VERR_HM_IPE_1;
3446# endif
3447
3448 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3449
3450#ifndef IN_NEM_DARWIN
3451 /*
3452 * We disable interrupts to make the updating of the state and in particular
3453 * the fExtrn modification atomic wrt to preemption hooks.
3454 */
3455 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3456#endif
3457
3458 fWhat &= pCtx->fExtrn;
3459 if (fWhat)
3460 {
3461 do
3462 {
3463 if (fWhat & CPUMCTX_EXTRN_RIP)
3464 vmxHCImportGuestRip(pVCpu);
3465
3466 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3467 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3468
3469 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3470 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3471
3472 if (fWhat & CPUMCTX_EXTRN_RSP)
3473 {
3474 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3475 AssertRC(rc);
3476 }
3477
3478 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3479 {
3480 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3481#ifndef IN_NEM_DARWIN
3482 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3483#else
3484 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3485#endif
3486 if (fWhat & CPUMCTX_EXTRN_CS)
3487 {
3488 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3489 vmxHCImportGuestRip(pVCpu);
3490 if (fRealOnV86Active)
3491 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3492 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3493 }
3494 if (fWhat & CPUMCTX_EXTRN_SS)
3495 {
3496 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3497 if (fRealOnV86Active)
3498 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3499 }
3500 if (fWhat & CPUMCTX_EXTRN_DS)
3501 {
3502 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3503 if (fRealOnV86Active)
3504 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3505 }
3506 if (fWhat & CPUMCTX_EXTRN_ES)
3507 {
3508 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3509 if (fRealOnV86Active)
3510 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3511 }
3512 if (fWhat & CPUMCTX_EXTRN_FS)
3513 {
3514 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3515 if (fRealOnV86Active)
3516 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3517 }
3518 if (fWhat & CPUMCTX_EXTRN_GS)
3519 {
3520 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3521 if (fRealOnV86Active)
3522 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3523 }
3524 }
3525
3526 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3527 {
3528 if (fWhat & CPUMCTX_EXTRN_LDTR)
3529 vmxHCImportGuestLdtr(pVCpu);
3530
3531 if (fWhat & CPUMCTX_EXTRN_GDTR)
3532 {
3533 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3534 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3535 pCtx->gdtr.cbGdt = u32Val;
3536 }
3537
3538 /* Guest IDTR. */
3539 if (fWhat & CPUMCTX_EXTRN_IDTR)
3540 {
3541 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3542 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3543 pCtx->idtr.cbIdt = u32Val;
3544 }
3545
3546 /* Guest TR. */
3547 if (fWhat & CPUMCTX_EXTRN_TR)
3548 {
3549#ifndef IN_NEM_DARWIN
3550 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3551 don't need to import that one. */
3552 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3553#endif
3554 vmxHCImportGuestTr(pVCpu);
3555 }
3556 }
3557
3558 if (fWhat & CPUMCTX_EXTRN_DR7)
3559 {
3560#ifndef IN_NEM_DARWIN
3561 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3562#endif
3563 {
3564 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3565 AssertRC(rc);
3566 }
3567 }
3568
3569 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3570 {
3571 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3572 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3573 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3574 pCtx->SysEnter.cs = u32Val;
3575 }
3576
3577#ifndef IN_NEM_DARWIN
3578 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3579 {
3580 if ( pVM->hmr0.s.fAllow64BitGuests
3581 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3582 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3583 }
3584
3585 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3586 {
3587 if ( pVM->hmr0.s.fAllow64BitGuests
3588 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3589 {
3590 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3591 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3592 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3593 }
3594 }
3595
3596 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3597 {
3598 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3599 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3600 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3601 Assert(pMsrs);
3602 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3603 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3604 for (uint32_t i = 0; i < cMsrs; i++)
3605 {
3606 uint32_t const idMsr = pMsrs[i].u32Msr;
3607 switch (idMsr)
3608 {
3609 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3610 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3611 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3612 default:
3613 {
3614 uint32_t idxLbrMsr;
3615 if (VM_IS_VMX_LBR(pVM))
3616 {
3617 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3618 {
3619 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3620 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3621 break;
3622 }
3623 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3624 {
3625 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3626 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3627 break;
3628 }
3629 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3630 {
3631 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3632 break;
3633 }
3634 /* Fallthru (no break) */
3635 }
3636 pCtx->fExtrn = 0;
3637 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3638 ASMSetFlags(fEFlags);
3639 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3640 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3641 }
3642 }
3643 }
3644 }
3645#endif
3646
3647 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3648 {
3649 if (fWhat & CPUMCTX_EXTRN_CR0)
3650 {
3651 uint64_t u64Cr0;
3652 uint64_t u64Shadow;
3653 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3654 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3655#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3656 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3657 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3658#else
3659 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3660 {
3661 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3662 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3663 }
3664 else
3665 {
3666 /*
3667 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3668 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3669 * re-construct CR0. See @bugref{9180#c95} for details.
3670 */
3671 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3672 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3673 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3674 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3675 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3676 }
3677#endif
3678#ifndef IN_NEM_DARWIN
3679 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3680#endif
3681 CPUMSetGuestCR0(pVCpu, u64Cr0);
3682#ifndef IN_NEM_DARWIN
3683 VMMRZCallRing3Enable(pVCpu);
3684#endif
3685 }
3686
3687 if (fWhat & CPUMCTX_EXTRN_CR4)
3688 {
3689 uint64_t u64Cr4;
3690 uint64_t u64Shadow;
3691 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3692 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3693#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3694 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3695 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3696#else
3697 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3698 {
3699 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3700 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3701 }
3702 else
3703 {
3704 /*
3705 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3706 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3707 * re-construct CR4. See @bugref{9180#c95} for details.
3708 */
3709 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3710 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3711 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3712 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3713 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3714 }
3715#endif
3716 pCtx->cr4 = u64Cr4;
3717 }
3718
3719 if (fWhat & CPUMCTX_EXTRN_CR3)
3720 {
3721 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3722 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3723 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3724 && CPUMIsGuestPagingEnabledEx(pCtx)))
3725 {
3726 uint64_t u64Cr3;
3727 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3728 if (pCtx->cr3 != u64Cr3)
3729 {
3730 pCtx->cr3 = u64Cr3;
3731 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3732 }
3733
3734 /*
3735 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3736 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3737 */
3738 if (CPUMIsGuestInPAEModeEx(pCtx))
3739 {
3740 X86PDPE aPaePdpes[4];
3741 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3742 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3743 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3744 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3745 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3746 {
3747 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3748 /* PGM now updates PAE PDPTEs while updating CR3. */
3749 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3750 }
3751 }
3752 }
3753 }
3754 }
3755
3756#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3757 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3758 {
3759 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3760 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3761 {
3762 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3763 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3764 if (RT_SUCCESS(rc))
3765 { /* likely */ }
3766 else
3767 break;
3768 }
3769 }
3770#endif
3771 } while (0);
3772
3773 if (RT_SUCCESS(rc))
3774 {
3775 /* Update fExtrn. */
3776 pCtx->fExtrn &= ~fWhat;
3777
3778 /* If everything has been imported, clear the HM keeper bit. */
3779 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3780 {
3781#ifndef IN_NEM_DARWIN
3782 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3783#else
3784 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3785#endif
3786 Assert(!pCtx->fExtrn);
3787 }
3788 }
3789 }
3790#ifndef IN_NEM_DARWIN
3791 else
3792 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3793
3794 /*
3795 * Restore interrupts.
3796 */
3797 ASMSetFlags(fEFlags);
3798#endif
3799
3800 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3801
3802 if (RT_SUCCESS(rc))
3803 { /* likely */ }
3804 else
3805 return rc;
3806
3807 /*
3808 * Honor any pending CR3 updates.
3809 *
3810 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3811 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3812 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3813 *
3814 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3815 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3816 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3817 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3818 *
3819 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3820 *
3821 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3822 */
3823 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3824#ifndef IN_NEM_DARWIN
3825 && VMMRZCallRing3IsEnabled(pVCpu)
3826#endif
3827 )
3828 {
3829 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3830 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3831 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3832 }
3833
3834 return VINF_SUCCESS;
3835}
3836
3837
3838/**
3839 * Check per-VM and per-VCPU force flag actions that require us to go back to
3840 * ring-3 for one reason or another.
3841 *
3842 * @returns Strict VBox status code (i.e. informational status codes too)
3843 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3844 * ring-3.
3845 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3846 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3847 * interrupts)
3848 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3849 * all EMTs to be in ring-3.
3850 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3851 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3852 * to the EM loop.
3853 *
3854 * @param pVCpu The cross context virtual CPU structure.
3855 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3856 * @param fStepping Whether we are single-stepping the guest using the
3857 * hypervisor debugger.
3858 *
3859 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3860 * is no longer in VMX non-root mode.
3861 */
3862static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3863{
3864#ifndef IN_NEM_DARWIN
3865 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3866#endif
3867
3868 /*
3869 * Update pending interrupts into the APIC's IRR.
3870 */
3871 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3872 APICUpdatePendingInterrupts(pVCpu);
3873
3874 /*
3875 * Anything pending? Should be more likely than not if we're doing a good job.
3876 */
3877 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3878 if ( !fStepping
3879 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3880 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3881 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3882 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3883 return VINF_SUCCESS;
3884
3885 /* Pending PGM C3 sync. */
3886 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3887 {
3888 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3889 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3890 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3891 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3892 if (rcStrict != VINF_SUCCESS)
3893 {
3894 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3895 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3896 return rcStrict;
3897 }
3898 }
3899
3900 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3901 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3902 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3903 {
3904 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3905 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3906 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3907 return rc;
3908 }
3909
3910 /* Pending VM request packets, such as hardware interrupts. */
3911 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3912 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3913 {
3914 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3915 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3916 return VINF_EM_PENDING_REQUEST;
3917 }
3918
3919 /* Pending PGM pool flushes. */
3920 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3921 {
3922 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3923 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3924 return VINF_PGM_POOL_FLUSH_PENDING;
3925 }
3926
3927 /* Pending DMA requests. */
3928 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3929 {
3930 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3931 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3932 return VINF_EM_RAW_TO_R3;
3933 }
3934
3935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3936 /*
3937 * Pending nested-guest events.
3938 *
3939 * Please note the priority of these events are specified and important.
3940 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3941 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3942 */
3943 if (fIsNestedGuest)
3944 {
3945 /* Pending nested-guest APIC-write. */
3946 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3947 {
3948 Log4Func(("Pending nested-guest APIC-write\n"));
3949 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3950 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3951 return rcStrict;
3952 }
3953
3954 /* Pending nested-guest monitor-trap flag (MTF). */
3955 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3956 {
3957 Log4Func(("Pending nested-guest MTF\n"));
3958 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3959 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3960 return rcStrict;
3961 }
3962
3963 /* Pending nested-guest VMX-preemption timer expired. */
3964 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3965 {
3966 Log4Func(("Pending nested-guest preempt timer\n"));
3967 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3968 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3969 return rcStrict;
3970 }
3971 }
3972#else
3973 NOREF(fIsNestedGuest);
3974#endif
3975
3976 return VINF_SUCCESS;
3977}
3978
3979
3980/**
3981 * Converts any TRPM trap into a pending HM event. This is typically used when
3982 * entering from ring-3 (not longjmp returns).
3983 *
3984 * @param pVCpu The cross context virtual CPU structure.
3985 */
3986static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3987{
3988 Assert(TRPMHasTrap(pVCpu));
3989 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3990
3991 uint8_t uVector;
3992 TRPMEVENT enmTrpmEvent;
3993 uint32_t uErrCode;
3994 RTGCUINTPTR GCPtrFaultAddress;
3995 uint8_t cbInstr;
3996 bool fIcebp;
3997
3998 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3999 AssertRC(rc);
4000
4001 uint32_t u32IntInfo;
4002 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4003 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4004
4005 rc = TRPMResetTrap(pVCpu);
4006 AssertRC(rc);
4007 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4008 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4009
4010 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4011}
4012
4013
4014/**
4015 * Converts the pending HM event into a TRPM trap.
4016 *
4017 * @param pVCpu The cross context virtual CPU structure.
4018 */
4019static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4020{
4021 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4022
4023 /* If a trap was already pending, we did something wrong! */
4024 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4025
4026 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4027 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4028 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4029
4030 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4031
4032 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4033 AssertRC(rc);
4034
4035 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4036 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4037
4038 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4039 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4040 else
4041 {
4042 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4043 switch (uVectorType)
4044 {
4045 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4046 TRPMSetTrapDueToIcebp(pVCpu);
4047 RT_FALL_THRU();
4048 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4049 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4050 {
4051 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4052 || ( uVector == X86_XCPT_BP /* INT3 */
4053 || uVector == X86_XCPT_OF /* INTO */
4054 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4055 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4056 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4057 break;
4058 }
4059 }
4060 }
4061
4062 /* We're now done converting the pending event. */
4063 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4064}
4065
4066
4067/**
4068 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4069 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4070 *
4071 * @param pVCpu The cross context virtual CPU structure.
4072 * @param pVmcsInfo The VMCS info. object.
4073 */
4074static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4075{
4076 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4077 {
4078 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4079 {
4080 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4081 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4082 AssertRC(rc);
4083 }
4084 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4085}
4086
4087
4088/**
4089 * Clears the interrupt-window exiting control in the VMCS.
4090 *
4091 * @param pVCpu The cross context virtual CPU structure.
4092 * @param pVmcsInfo The VMCS info. object.
4093 */
4094DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4095{
4096 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4097 {
4098 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4099 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4100 AssertRC(rc);
4101 }
4102}
4103
4104
4105/**
4106 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4107 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4108 *
4109 * @param pVCpu The cross context virtual CPU structure.
4110 * @param pVmcsInfo The VMCS info. object.
4111 */
4112static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4113{
4114 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4115 {
4116 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4117 {
4118 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4119 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4120 AssertRC(rc);
4121 Log4Func(("Setup NMI-window exiting\n"));
4122 }
4123 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4124}
4125
4126
4127/**
4128 * Clears the NMI-window exiting control in the VMCS.
4129 *
4130 * @param pVCpu The cross context virtual CPU structure.
4131 * @param pVmcsInfo The VMCS info. object.
4132 */
4133DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4134{
4135 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4136 {
4137 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4138 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4139 AssertRC(rc);
4140 }
4141}
4142
4143
4144/**
4145 * Injects an event into the guest upon VM-entry by updating the relevant fields
4146 * in the VM-entry area in the VMCS.
4147 *
4148 * @returns Strict VBox status code (i.e. informational status codes too).
4149 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4150 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4151 *
4152 * @param pVCpu The cross context virtual CPU structure.
4153 * @param pVmcsInfo The VMCS info object.
4154 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4155 * @param pEvent The event being injected.
4156 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4157 * will be updated if necessary. This cannot not be NULL.
4158 * @param fStepping Whether we're single-stepping guest execution and should
4159 * return VINF_EM_DBG_STEPPED if the event is injected
4160 * directly (registers modified by us, not by hardware on
4161 * VM-entry).
4162 */
4163static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4164 bool fStepping, uint32_t *pfIntrState)
4165{
4166 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4167 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4168 Assert(pfIntrState);
4169
4170#ifdef IN_NEM_DARWIN
4171 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4172#endif
4173
4174 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4175 uint32_t u32IntInfo = pEvent->u64IntInfo;
4176 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4177 uint32_t const cbInstr = pEvent->cbInstr;
4178 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4179 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4180 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4181
4182#ifdef VBOX_STRICT
4183 /*
4184 * Validate the error-code-valid bit for hardware exceptions.
4185 * No error codes for exceptions in real-mode.
4186 *
4187 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4188 */
4189 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4190 && !CPUMIsGuestInRealModeEx(pCtx))
4191 {
4192 switch (uVector)
4193 {
4194 case X86_XCPT_PF:
4195 case X86_XCPT_DF:
4196 case X86_XCPT_TS:
4197 case X86_XCPT_NP:
4198 case X86_XCPT_SS:
4199 case X86_XCPT_GP:
4200 case X86_XCPT_AC:
4201 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4202 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4203 RT_FALL_THRU();
4204 default:
4205 break;
4206 }
4207 }
4208
4209 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4210 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4211 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4212#endif
4213
4214 RT_NOREF(uVector);
4215 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4216 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4217 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4218 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4219 {
4220 Assert(uVector <= X86_XCPT_LAST);
4221 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4222 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4223 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4224 }
4225 else
4226 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4227
4228 /*
4229 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4230 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4231 * interrupt handler in the (real-mode) guest.
4232 *
4233 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4234 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4235 */
4236 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4237 {
4238#ifndef IN_NEM_DARWIN
4239 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4240#endif
4241 {
4242 /*
4243 * For CPUs with unrestricted guest execution enabled and with the guest
4244 * in real-mode, we must not set the deliver-error-code bit.
4245 *
4246 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4247 */
4248 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4249 }
4250#ifndef IN_NEM_DARWIN
4251 else
4252 {
4253 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4254 Assert(PDMVmmDevHeapIsEnabled(pVM));
4255 Assert(pVM->hm.s.vmx.pRealModeTSS);
4256 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4257
4258 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4259 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4260 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4261 AssertRCReturn(rc2, rc2);
4262
4263 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4264 size_t const cbIdtEntry = sizeof(X86IDTR16);
4265 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4266 {
4267 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4268 if (uVector == X86_XCPT_DF)
4269 return VINF_EM_RESET;
4270
4271 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4272 No error codes for exceptions in real-mode. */
4273 if (uVector == X86_XCPT_GP)
4274 {
4275 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4276 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4277 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4278 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4279 HMEVENT EventXcptDf;
4280 RT_ZERO(EventXcptDf);
4281 EventXcptDf.u64IntInfo = uXcptDfInfo;
4282 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4283 }
4284
4285 /*
4286 * If we're injecting an event with no valid IDT entry, inject a #GP.
4287 * No error codes for exceptions in real-mode.
4288 *
4289 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4290 */
4291 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4292 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4293 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4294 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4295 HMEVENT EventXcptGp;
4296 RT_ZERO(EventXcptGp);
4297 EventXcptGp.u64IntInfo = uXcptGpInfo;
4298 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4299 }
4300
4301 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4302 uint16_t uGuestIp = pCtx->ip;
4303 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4304 {
4305 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4306 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4307 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4308 }
4309 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4310 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4311
4312 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4313 X86IDTR16 IdtEntry;
4314 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4315 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4316 AssertRCReturn(rc2, rc2);
4317
4318 /* Construct the stack frame for the interrupt/exception handler. */
4319 VBOXSTRICTRC rcStrict;
4320 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4321 if (rcStrict == VINF_SUCCESS)
4322 {
4323 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4324 if (rcStrict == VINF_SUCCESS)
4325 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4326 }
4327
4328 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4329 if (rcStrict == VINF_SUCCESS)
4330 {
4331 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4332 pCtx->rip = IdtEntry.offSel;
4333 pCtx->cs.Sel = IdtEntry.uSel;
4334 pCtx->cs.ValidSel = IdtEntry.uSel;
4335 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4336 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4337 && uVector == X86_XCPT_PF)
4338 pCtx->cr2 = GCPtrFault;
4339
4340 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4341 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4342 | HM_CHANGED_GUEST_RSP);
4343
4344 /*
4345 * If we delivered a hardware exception (other than an NMI) and if there was
4346 * block-by-STI in effect, we should clear it.
4347 */
4348 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4349 {
4350 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4351 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4352 Log4Func(("Clearing inhibition due to STI\n"));
4353 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4354 }
4355
4356 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4357 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4358
4359 /*
4360 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4361 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4362 */
4363 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4364
4365 /*
4366 * If we eventually support nested-guest execution without unrestricted guest execution,
4367 * we should set fInterceptEvents here.
4368 */
4369 Assert(!fIsNestedGuest);
4370
4371 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4372 if (fStepping)
4373 rcStrict = VINF_EM_DBG_STEPPED;
4374 }
4375 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4376 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4377 return rcStrict;
4378 }
4379#else
4380 RT_NOREF(pVmcsInfo);
4381#endif
4382 }
4383
4384 /*
4385 * Validate.
4386 */
4387 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4388 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4389
4390 /*
4391 * Inject the event into the VMCS.
4392 */
4393 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4394 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4395 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4396 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4397 AssertRC(rc);
4398
4399 /*
4400 * Update guest CR2 if this is a page-fault.
4401 */
4402 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4403 pCtx->cr2 = GCPtrFault;
4404
4405 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4406 return VINF_SUCCESS;
4407}
4408
4409
4410/**
4411 * Evaluates the event to be delivered to the guest and sets it as the pending
4412 * event.
4413 *
4414 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4415 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4416 * NOT restore these force-flags.
4417 *
4418 * @returns Strict VBox status code (i.e. informational status codes too).
4419 * @param pVCpu The cross context virtual CPU structure.
4420 * @param pVmcsInfo The VMCS information structure.
4421 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4422 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4423 */
4424static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4425{
4426 Assert(pfIntrState);
4427 Assert(!TRPMHasTrap(pVCpu));
4428
4429 /*
4430 * Compute/update guest-interruptibility state related FFs.
4431 * The FFs will be used below while evaluating events to be injected.
4432 */
4433 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4434
4435 /*
4436 * Evaluate if a new event needs to be injected.
4437 * An event that's already pending has already performed all necessary checks.
4438 */
4439 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4440 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4441 {
4442 /** @todo SMI. SMIs take priority over NMIs. */
4443
4444 /*
4445 * NMIs.
4446 * NMIs take priority over external interrupts.
4447 */
4448#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4449 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4450#endif
4451 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4452 {
4453 /*
4454 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4455 *
4456 * For a nested-guest, the FF always indicates the outer guest's ability to
4457 * receive an NMI while the guest-interruptibility state bit depends on whether
4458 * the nested-hypervisor is using virtual-NMIs.
4459 */
4460 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4461 {
4462#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4463 if ( fIsNestedGuest
4464 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4465 return IEMExecVmxVmexitXcptNmi(pVCpu);
4466#endif
4467 vmxHCSetPendingXcptNmi(pVCpu);
4468 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4469 Log4Func(("NMI pending injection\n"));
4470
4471 /* We've injected the NMI, bail. */
4472 return VINF_SUCCESS;
4473 }
4474 else if (!fIsNestedGuest)
4475 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4476 }
4477
4478 /*
4479 * External interrupts (PIC/APIC).
4480 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4481 * We cannot re-request the interrupt from the controller again.
4482 */
4483 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4484 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4485 {
4486 Assert(!DBGFIsStepping(pVCpu));
4487 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4488 AssertRC(rc);
4489
4490 /*
4491 * We must not check EFLAGS directly when executing a nested-guest, use
4492 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4493 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4494 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4495 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4496 *
4497 * See Intel spec. 25.4.1 "Event Blocking".
4498 */
4499 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4500 {
4501#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4502 if ( fIsNestedGuest
4503 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4504 {
4505 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4506 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4507 return rcStrict;
4508 }
4509#endif
4510 uint8_t u8Interrupt;
4511 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4512 if (RT_SUCCESS(rc))
4513 {
4514#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4515 if ( fIsNestedGuest
4516 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4517 {
4518 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4519 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4520 return rcStrict;
4521 }
4522#endif
4523 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4524 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4525 }
4526 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4527 {
4528 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4529
4530 if ( !fIsNestedGuest
4531 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4532 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4533 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4534
4535 /*
4536 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4537 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4538 * need to re-set this force-flag here.
4539 */
4540 }
4541 else
4542 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4543
4544 /* We've injected the interrupt or taken necessary action, bail. */
4545 return VINF_SUCCESS;
4546 }
4547 if (!fIsNestedGuest)
4548 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4549 }
4550 }
4551 else if (!fIsNestedGuest)
4552 {
4553 /*
4554 * An event is being injected or we are in an interrupt shadow. Check if another event is
4555 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4556 * the pending event.
4557 */
4558 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4559 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4560 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4561 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4562 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4563 }
4564 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4565
4566 return VINF_SUCCESS;
4567}
4568
4569
4570/**
4571 * Injects any pending events into the guest if the guest is in a state to
4572 * receive them.
4573 *
4574 * @returns Strict VBox status code (i.e. informational status codes too).
4575 * @param pVCpu The cross context virtual CPU structure.
4576 * @param pVmcsInfo The VMCS information structure.
4577 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4578 * @param fIntrState The VT-x guest-interruptibility state.
4579 * @param fStepping Whether we are single-stepping the guest using the
4580 * hypervisor debugger and should return
4581 * VINF_EM_DBG_STEPPED if the event was dispatched
4582 * directly.
4583 */
4584static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4585 uint32_t fIntrState, bool fStepping)
4586{
4587 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4588#ifndef IN_NEM_DARWIN
4589 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4590#endif
4591
4592#ifdef VBOX_STRICT
4593 /*
4594 * Verify guest-interruptibility state.
4595 *
4596 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4597 * since injecting an event may modify the interruptibility state and we must thus always
4598 * use fIntrState.
4599 */
4600 {
4601 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4602 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4603 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4604 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4605 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4606 Assert(!TRPMHasTrap(pVCpu));
4607 NOREF(fBlockMovSS); NOREF(fBlockSti);
4608 }
4609#endif
4610
4611 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4612 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4613 {
4614 /*
4615 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4616 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4617 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4618 *
4619 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4620 */
4621 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4622#ifdef VBOX_STRICT
4623 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4624 {
4625 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4626 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4627 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4628 }
4629 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4630 {
4631 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4632 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4633 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4634 }
4635#endif
4636 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4637 uIntType));
4638
4639 /*
4640 * Inject the event and get any changes to the guest-interruptibility state.
4641 *
4642 * The guest-interruptibility state may need to be updated if we inject the event
4643 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4644 */
4645 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4646 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4647
4648 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4649 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4650 else
4651 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4652 }
4653
4654 /*
4655 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4656 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4657 */
4658 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4659 && !fIsNestedGuest)
4660 {
4661 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4662
4663 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4664 {
4665 /*
4666 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4667 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4668 */
4669 Assert(!DBGFIsStepping(pVCpu));
4670 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4671 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4672 AssertRC(rc);
4673 }
4674 else
4675 {
4676 /*
4677 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4678 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4679 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4680 * we use MTF, so just make sure it's called before executing guest-code.
4681 */
4682 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4683 }
4684 }
4685 /* else: for nested-guest currently handling while merging controls. */
4686
4687 /*
4688 * Finally, update the guest-interruptibility state.
4689 *
4690 * This is required for the real-on-v86 software interrupt injection, for
4691 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4692 */
4693 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4694 AssertRC(rc);
4695
4696 /*
4697 * There's no need to clear the VM-entry interruption-information field here if we're not
4698 * injecting anything. VT-x clears the valid bit on every VM-exit.
4699 *
4700 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4701 */
4702
4703 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4704 return rcStrict;
4705}
4706
4707
4708/**
4709 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4710 * and update error record fields accordingly.
4711 *
4712 * @returns VMX_IGS_* error codes.
4713 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4714 * wrong with the guest state.
4715 *
4716 * @param pVCpu The cross context virtual CPU structure.
4717 * @param pVmcsInfo The VMCS info. object.
4718 *
4719 * @remarks This function assumes our cache of the VMCS controls
4720 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4721 */
4722static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4723{
4724#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4725#define HMVMX_CHECK_BREAK(expr, err) do { \
4726 if (!(expr)) { uError = (err); break; } \
4727 } while (0)
4728
4729 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4730 uint32_t uError = VMX_IGS_ERROR;
4731 uint32_t u32IntrState = 0;
4732#ifndef IN_NEM_DARWIN
4733 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4734 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4735#else
4736 bool const fUnrestrictedGuest = true;
4737#endif
4738 do
4739 {
4740 int rc;
4741
4742 /*
4743 * Guest-interruptibility state.
4744 *
4745 * Read this first so that any check that fails prior to those that actually
4746 * require the guest-interruptibility state would still reflect the correct
4747 * VMCS value and avoids causing further confusion.
4748 */
4749 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4750 AssertRC(rc);
4751
4752 uint32_t u32Val;
4753 uint64_t u64Val;
4754
4755 /*
4756 * CR0.
4757 */
4758 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4759 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4760 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4761 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4762 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4763 if (fUnrestrictedGuest)
4764 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4765
4766 uint64_t u64GuestCr0;
4767 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4768 AssertRC(rc);
4769 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4770 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4771 if ( !fUnrestrictedGuest
4772 && (u64GuestCr0 & X86_CR0_PG)
4773 && !(u64GuestCr0 & X86_CR0_PE))
4774 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4775
4776 /*
4777 * CR4.
4778 */
4779 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4780 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4781 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4782
4783 uint64_t u64GuestCr4;
4784 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4785 AssertRC(rc);
4786 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4787 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4788
4789 /*
4790 * IA32_DEBUGCTL MSR.
4791 */
4792 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4793 AssertRC(rc);
4794 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4795 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4796 {
4797 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4798 }
4799 uint64_t u64DebugCtlMsr = u64Val;
4800
4801#ifdef VBOX_STRICT
4802 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4803 AssertRC(rc);
4804 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4805#endif
4806 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4807
4808 /*
4809 * RIP and RFLAGS.
4810 */
4811 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4812 AssertRC(rc);
4813 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4814 if ( !fLongModeGuest
4815 || !pCtx->cs.Attr.n.u1Long)
4816 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4817 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4818 * must be identical if the "IA-32e mode guest" VM-entry
4819 * control is 1 and CS.L is 1. No check applies if the
4820 * CPU supports 64 linear-address bits. */
4821
4822 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4823 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4824 AssertRC(rc);
4825 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4826 VMX_IGS_RFLAGS_RESERVED);
4827 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4828 uint32_t const u32Eflags = u64Val;
4829
4830 if ( fLongModeGuest
4831 || ( fUnrestrictedGuest
4832 && !(u64GuestCr0 & X86_CR0_PE)))
4833 {
4834 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4835 }
4836
4837 uint32_t u32EntryInfo;
4838 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4839 AssertRC(rc);
4840 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4841 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4842
4843 /*
4844 * 64-bit checks.
4845 */
4846 if (fLongModeGuest)
4847 {
4848 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4849 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4850 }
4851
4852 if ( !fLongModeGuest
4853 && (u64GuestCr4 & X86_CR4_PCIDE))
4854 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4855
4856 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4857 * 51:32 beyond the processor's physical-address width are 0. */
4858
4859 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4860 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4861 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4862
4863#ifndef IN_NEM_DARWIN
4864 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4865 AssertRC(rc);
4866 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4867
4868 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4869 AssertRC(rc);
4870 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4871#endif
4872
4873 /*
4874 * PERF_GLOBAL MSR.
4875 */
4876 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4877 {
4878 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4879 AssertRC(rc);
4880 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4881 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4882 }
4883
4884 /*
4885 * PAT MSR.
4886 */
4887 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4888 {
4889 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4890 AssertRC(rc);
4891 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4892 for (unsigned i = 0; i < 8; i++)
4893 {
4894 uint8_t u8Val = (u64Val & 0xff);
4895 if ( u8Val != 0 /* UC */
4896 && u8Val != 1 /* WC */
4897 && u8Val != 4 /* WT */
4898 && u8Val != 5 /* WP */
4899 && u8Val != 6 /* WB */
4900 && u8Val != 7 /* UC- */)
4901 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4902 u64Val >>= 8;
4903 }
4904 }
4905
4906 /*
4907 * EFER MSR.
4908 */
4909 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4910 {
4911 Assert(g_fHmVmxSupportsVmcsEfer);
4912 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4913 AssertRC(rc);
4914 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4915 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4916 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4917 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4918 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4919 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4920 * iemVmxVmentryCheckGuestState(). */
4921 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4922 || !(u64GuestCr0 & X86_CR0_PG)
4923 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4924 VMX_IGS_EFER_LMA_LME_MISMATCH);
4925 }
4926
4927 /*
4928 * Segment registers.
4929 */
4930 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4931 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4932 if (!(u32Eflags & X86_EFL_VM))
4933 {
4934 /* CS */
4935 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4936 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4937 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4938 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4939 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4940 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4941 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4942 /* CS cannot be loaded with NULL in protected mode. */
4943 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4944 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4945 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4946 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4947 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4948 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4949 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4950 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4951 else
4952 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4953
4954 /* SS */
4955 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4956 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4957 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4958 if ( !(pCtx->cr0 & X86_CR0_PE)
4959 || pCtx->cs.Attr.n.u4Type == 3)
4960 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4961
4962 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4963 {
4964 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4965 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4966 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4967 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4968 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4969 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4970 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4971 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4972 }
4973
4974 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4975 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4976 {
4977 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4978 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4979 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4980 || pCtx->ds.Attr.n.u4Type > 11
4981 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4982 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4983 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4984 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4985 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4986 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4987 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4988 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4989 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4990 }
4991 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4992 {
4993 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4994 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4995 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4996 || pCtx->es.Attr.n.u4Type > 11
4997 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4998 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4999 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5000 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5001 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5002 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5003 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5004 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5005 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5006 }
5007 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5008 {
5009 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5010 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5011 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5012 || pCtx->fs.Attr.n.u4Type > 11
5013 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5014 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5015 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5016 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5017 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5018 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5019 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5020 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5021 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5022 }
5023 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5024 {
5025 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5026 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5027 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5028 || pCtx->gs.Attr.n.u4Type > 11
5029 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5030 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5031 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5032 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5033 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5034 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5035 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5036 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5037 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5038 }
5039 /* 64-bit capable CPUs. */
5040 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5041 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5042 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5043 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5044 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5045 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5046 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5047 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5048 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5049 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5050 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5051 }
5052 else
5053 {
5054 /* V86 mode checks. */
5055 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5056 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5057 {
5058 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5059 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5060 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5061 }
5062 else
5063 {
5064 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5065 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5066 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5067 }
5068
5069 /* CS */
5070 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5071 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5072 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5073 /* SS */
5074 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5075 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5076 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5077 /* DS */
5078 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5079 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5080 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5081 /* ES */
5082 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5083 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5084 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5085 /* FS */
5086 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5087 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5088 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5089 /* GS */
5090 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5091 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5092 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5093 /* 64-bit capable CPUs. */
5094 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5095 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5096 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5097 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5098 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5099 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5100 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5101 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5102 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5103 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5104 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5105 }
5106
5107 /*
5108 * TR.
5109 */
5110 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5111 /* 64-bit capable CPUs. */
5112 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5113 if (fLongModeGuest)
5114 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5115 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5116 else
5117 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5118 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5119 VMX_IGS_TR_ATTR_TYPE_INVALID);
5120 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5121 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5122 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5123 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5124 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5125 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5126 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5127 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5128
5129 /*
5130 * GDTR and IDTR (64-bit capable checks).
5131 */
5132 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5133 AssertRC(rc);
5134 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5135
5136 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5137 AssertRC(rc);
5138 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5139
5140 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5141 AssertRC(rc);
5142 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5143
5144 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5145 AssertRC(rc);
5146 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5147
5148 /*
5149 * Guest Non-Register State.
5150 */
5151 /* Activity State. */
5152 uint32_t u32ActivityState;
5153 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5154 AssertRC(rc);
5155 HMVMX_CHECK_BREAK( !u32ActivityState
5156 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5157 VMX_IGS_ACTIVITY_STATE_INVALID);
5158 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5159 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5160
5161 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5162 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5163 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5164
5165 /** @todo Activity state and injecting interrupts. Left as a todo since we
5166 * currently don't use activity states but ACTIVE. */
5167
5168 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5169 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5170
5171 /* Guest interruptibility-state. */
5172 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5173 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5174 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5175 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5176 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5177 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5178 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5179 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5180 {
5181 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5182 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5183 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5184 }
5185 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5186 {
5187 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5188 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5189 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5190 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5191 }
5192 /** @todo Assumes the processor is not in SMM. */
5193 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5194 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5195 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5196 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5197 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5198 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5199 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5200 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5201
5202 /* Pending debug exceptions. */
5203 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5204 AssertRC(rc);
5205 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5206 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5207 u32Val = u64Val; /* For pending debug exceptions checks below. */
5208
5209 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5210 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5211 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5212 {
5213 if ( (u32Eflags & X86_EFL_TF)
5214 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5215 {
5216 /* Bit 14 is PendingDebug.BS. */
5217 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5218 }
5219 if ( !(u32Eflags & X86_EFL_TF)
5220 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5221 {
5222 /* Bit 14 is PendingDebug.BS. */
5223 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5224 }
5225 }
5226
5227#ifndef IN_NEM_DARWIN
5228 /* VMCS link pointer. */
5229 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5230 AssertRC(rc);
5231 if (u64Val != UINT64_C(0xffffffffffffffff))
5232 {
5233 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5234 /** @todo Bits beyond the processor's physical-address width MBZ. */
5235 /** @todo SMM checks. */
5236 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5237 Assert(pVmcsInfo->pvShadowVmcs);
5238 VMXVMCSREVID VmcsRevId;
5239 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5240 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5241 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5242 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5243 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5244 }
5245
5246 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5247 * not using nested paging? */
5248 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5249 && !fLongModeGuest
5250 && CPUMIsGuestInPAEModeEx(pCtx))
5251 {
5252 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5253 AssertRC(rc);
5254 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5255
5256 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5257 AssertRC(rc);
5258 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5259
5260 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5261 AssertRC(rc);
5262 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5263
5264 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5265 AssertRC(rc);
5266 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5267 }
5268#endif
5269
5270 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5271 if (uError == VMX_IGS_ERROR)
5272 uError = VMX_IGS_REASON_NOT_FOUND;
5273 } while (0);
5274
5275 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5276 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5277 return uError;
5278
5279#undef HMVMX_ERROR_BREAK
5280#undef HMVMX_CHECK_BREAK
5281}
5282
5283
5284#ifndef HMVMX_USE_FUNCTION_TABLE
5285/**
5286 * Handles a guest VM-exit from hardware-assisted VMX execution.
5287 *
5288 * @returns Strict VBox status code (i.e. informational status codes too).
5289 * @param pVCpu The cross context virtual CPU structure.
5290 * @param pVmxTransient The VMX-transient structure.
5291 */
5292DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5293{
5294#ifdef DEBUG_ramshankar
5295# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5296 do { \
5297 if (a_fSave != 0) \
5298 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5299 VBOXSTRICTRC rcStrict = a_CallExpr; \
5300 if (a_fSave != 0) \
5301 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5302 return rcStrict; \
5303 } while (0)
5304#else
5305# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5306#endif
5307 uint32_t const uExitReason = pVmxTransient->uExitReason;
5308 switch (uExitReason)
5309 {
5310 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5311 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5312 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5313 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5314 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5315 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5316 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5317 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5318 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5319 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5320 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5321 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5322 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5323 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5324 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5325 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5326 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5327 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5328 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5329 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5330 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5331 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5332 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5333 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5334 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5335 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5336 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5337 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5338 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5339 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5340#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5341 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5342 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5343 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5344 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5345 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5346 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5347 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5348 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5349 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5350 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5351#else
5352 case VMX_EXIT_VMCLEAR:
5353 case VMX_EXIT_VMLAUNCH:
5354 case VMX_EXIT_VMPTRLD:
5355 case VMX_EXIT_VMPTRST:
5356 case VMX_EXIT_VMREAD:
5357 case VMX_EXIT_VMRESUME:
5358 case VMX_EXIT_VMWRITE:
5359 case VMX_EXIT_VMXOFF:
5360 case VMX_EXIT_VMXON:
5361 case VMX_EXIT_INVVPID:
5362 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5363#endif
5364#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
5365 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5366#else
5367 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5368#endif
5369
5370 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5371 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5372 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5373
5374 case VMX_EXIT_INIT_SIGNAL:
5375 case VMX_EXIT_SIPI:
5376 case VMX_EXIT_IO_SMI:
5377 case VMX_EXIT_SMI:
5378 case VMX_EXIT_ERR_MSR_LOAD:
5379 case VMX_EXIT_ERR_MACHINE_CHECK:
5380 case VMX_EXIT_PML_FULL:
5381 case VMX_EXIT_VIRTUALIZED_EOI:
5382 case VMX_EXIT_GDTR_IDTR_ACCESS:
5383 case VMX_EXIT_LDTR_TR_ACCESS:
5384 case VMX_EXIT_APIC_WRITE:
5385 case VMX_EXIT_RDRAND:
5386 case VMX_EXIT_RSM:
5387 case VMX_EXIT_VMFUNC:
5388 case VMX_EXIT_ENCLS:
5389 case VMX_EXIT_RDSEED:
5390 case VMX_EXIT_XSAVES:
5391 case VMX_EXIT_XRSTORS:
5392 case VMX_EXIT_UMWAIT:
5393 case VMX_EXIT_TPAUSE:
5394 case VMX_EXIT_LOADIWKEY:
5395 default:
5396 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5397 }
5398#undef VMEXIT_CALL_RET
5399}
5400#endif /* !HMVMX_USE_FUNCTION_TABLE */
5401
5402
5403#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5404/**
5405 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5406 *
5407 * @returns Strict VBox status code (i.e. informational status codes too).
5408 * @param pVCpu The cross context virtual CPU structure.
5409 * @param pVmxTransient The VMX-transient structure.
5410 */
5411DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5412{
5413 uint32_t const uExitReason = pVmxTransient->uExitReason;
5414 switch (uExitReason)
5415 {
5416# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5417 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5418 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5419# else
5420 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5421 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5422# endif
5423 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5424 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5425 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5426
5427 /*
5428 * We shouldn't direct host physical interrupts to the nested-guest.
5429 */
5430 case VMX_EXIT_EXT_INT:
5431 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5432
5433 /*
5434 * Instructions that cause VM-exits unconditionally or the condition is
5435 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5436 * happens, it's guaranteed to be a nested-guest VM-exit).
5437 *
5438 * - Provides VM-exit instruction length ONLY.
5439 */
5440 case VMX_EXIT_CPUID: /* Unconditional. */
5441 case VMX_EXIT_VMCALL:
5442 case VMX_EXIT_GETSEC:
5443 case VMX_EXIT_INVD:
5444 case VMX_EXIT_XSETBV:
5445 case VMX_EXIT_VMLAUNCH:
5446 case VMX_EXIT_VMRESUME:
5447 case VMX_EXIT_VMXOFF:
5448 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5449 case VMX_EXIT_VMFUNC:
5450 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5451
5452 /*
5453 * Instructions that cause VM-exits unconditionally or the condition is
5454 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5455 * happens, it's guaranteed to be a nested-guest VM-exit).
5456 *
5457 * - Provides VM-exit instruction length.
5458 * - Provides VM-exit information.
5459 * - Optionally provides Exit qualification.
5460 *
5461 * Since Exit qualification is 0 for all VM-exits where it is not
5462 * applicable, reading and passing it to the guest should produce
5463 * defined behavior.
5464 *
5465 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5466 */
5467 case VMX_EXIT_INVEPT: /* Unconditional. */
5468 case VMX_EXIT_INVVPID:
5469 case VMX_EXIT_VMCLEAR:
5470 case VMX_EXIT_VMPTRLD:
5471 case VMX_EXIT_VMPTRST:
5472 case VMX_EXIT_VMXON:
5473 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5474 case VMX_EXIT_LDTR_TR_ACCESS:
5475 case VMX_EXIT_RDRAND:
5476 case VMX_EXIT_RDSEED:
5477 case VMX_EXIT_XSAVES:
5478 case VMX_EXIT_XRSTORS:
5479 case VMX_EXIT_UMWAIT:
5480 case VMX_EXIT_TPAUSE:
5481 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5482
5483 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5484 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5485 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5486 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5487 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5492 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5493 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5494 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5495 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5496 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5497 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5498 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5499 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5500 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5501 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5502
5503 case VMX_EXIT_PREEMPT_TIMER:
5504 {
5505 /** @todo NSTVMX: Preempt timer. */
5506 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5507 }
5508
5509 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5510 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5511
5512 case VMX_EXIT_VMREAD:
5513 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5514
5515 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5516 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5517
5518 case VMX_EXIT_INIT_SIGNAL:
5519 case VMX_EXIT_SIPI:
5520 case VMX_EXIT_IO_SMI:
5521 case VMX_EXIT_SMI:
5522 case VMX_EXIT_ERR_MSR_LOAD:
5523 case VMX_EXIT_ERR_MACHINE_CHECK:
5524 case VMX_EXIT_PML_FULL:
5525 case VMX_EXIT_RSM:
5526 default:
5527 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5528 }
5529}
5530#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5531
5532
5533/** @name VM-exit helpers.
5534 * @{
5535 */
5536/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5537/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5538/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5539
5540/** Macro for VM-exits called unexpectedly. */
5541#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5542 do { \
5543 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5544 return VERR_VMX_UNEXPECTED_EXIT; \
5545 } while (0)
5546
5547#ifdef VBOX_STRICT
5548# ifndef IN_NEM_DARWIN
5549/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5550# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5551 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5552
5553# define HMVMX_ASSERT_PREEMPT_CPUID() \
5554 do { \
5555 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5556 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5557 } while (0)
5558
5559# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5560 do { \
5561 AssertPtr((a_pVCpu)); \
5562 AssertPtr((a_pVmxTransient)); \
5563 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5564 Assert((a_pVmxTransient)->pVmcsInfo); \
5565 Assert(ASMIntAreEnabled()); \
5566 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5567 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5568 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5569 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5570 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5571 HMVMX_ASSERT_PREEMPT_CPUID(); \
5572 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5573 } while (0)
5574# else
5575# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5576# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5577# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5578 do { \
5579 AssertPtr((a_pVCpu)); \
5580 AssertPtr((a_pVmxTransient)); \
5581 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5582 Assert((a_pVmxTransient)->pVmcsInfo); \
5583 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5584 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5585 } while (0)
5586# endif
5587
5588# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5589 do { \
5590 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5591 Assert((a_pVmxTransient)->fIsNestedGuest); \
5592 } while (0)
5593
5594# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5595 do { \
5596 Log4Func(("\n")); \
5597 } while (0)
5598#else
5599# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5600 do { \
5601 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5602 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5603 } while (0)
5604
5605# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5606 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5607
5608# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5609#endif
5610
5611#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5612/** Macro that does the necessary privilege checks and intercepted VM-exits for
5613 * guests that attempted to execute a VMX instruction. */
5614# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5615 do \
5616 { \
5617 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5618 if (rcStrictTmp == VINF_SUCCESS) \
5619 { /* likely */ } \
5620 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5621 { \
5622 Assert((a_pVCpu)->hm.s.Event.fPending); \
5623 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5624 return VINF_SUCCESS; \
5625 } \
5626 else \
5627 { \
5628 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5629 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5630 } \
5631 } while (0)
5632
5633/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5634# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5635 do \
5636 { \
5637 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5638 (a_pGCPtrEffAddr)); \
5639 if (rcStrictTmp == VINF_SUCCESS) \
5640 { /* likely */ } \
5641 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5642 { \
5643 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5644 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5645 NOREF(uXcptTmp); \
5646 return VINF_SUCCESS; \
5647 } \
5648 else \
5649 { \
5650 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5651 return rcStrictTmp; \
5652 } \
5653 } while (0)
5654#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5655
5656
5657/**
5658 * Advances the guest RIP by the specified number of bytes.
5659 *
5660 * @param pVCpu The cross context virtual CPU structure.
5661 * @param cbInstr Number of bytes to advance the RIP by.
5662 *
5663 * @remarks No-long-jump zone!!!
5664 */
5665DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5666{
5667 /* Advance the RIP. */
5668 pVCpu->cpum.GstCtx.rip += cbInstr;
5669 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5670
5671 /* Update interrupt inhibition. */
5672 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5673 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5674 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5675}
5676
5677
5678/**
5679 * Advances the guest RIP after reading it from the VMCS.
5680 *
5681 * @returns VBox status code, no informational status codes.
5682 * @param pVCpu The cross context virtual CPU structure.
5683 * @param pVmxTransient The VMX-transient structure.
5684 *
5685 * @remarks No-long-jump zone!!!
5686 */
5687static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5688{
5689 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5690 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5691 AssertRCReturn(rc, rc);
5692
5693 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5694 return VINF_SUCCESS;
5695}
5696
5697
5698/**
5699 * Handle a condition that occurred while delivering an event through the guest or
5700 * nested-guest IDT.
5701 *
5702 * @returns Strict VBox status code (i.e. informational status codes too).
5703 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5704 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5705 * to continue execution of the guest which will delivery the \#DF.
5706 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5707 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5708 *
5709 * @param pVCpu The cross context virtual CPU structure.
5710 * @param pVmxTransient The VMX-transient structure.
5711 *
5712 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5713 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5714 * is due to an EPT violation, PML full or SPP-related event.
5715 *
5716 * @remarks No-long-jump zone!!!
5717 */
5718static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5719{
5720 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5721 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5722 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5723 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5724 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5725 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5726
5727 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5728 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5729 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5730 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5731 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5732 {
5733 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5734 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5735
5736 /*
5737 * If the event was a software interrupt (generated with INT n) or a software exception
5738 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5739 * can handle the VM-exit and continue guest execution which will re-execute the
5740 * instruction rather than re-injecting the exception, as that can cause premature
5741 * trips to ring-3 before injection and involve TRPM which currently has no way of
5742 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5743 * the problem).
5744 */
5745 IEMXCPTRAISE enmRaise;
5746 IEMXCPTRAISEINFO fRaiseInfo;
5747 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5748 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5749 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5750 {
5751 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5752 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5753 }
5754 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5755 {
5756 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5757 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5758 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5759
5760 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5761 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5762
5763 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5764
5765 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5766 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5767 {
5768 pVmxTransient->fVectoringPF = true;
5769 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5770 }
5771 }
5772 else
5773 {
5774 /*
5775 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5776 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5777 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5778 */
5779 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5780 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5781 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5782 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5783 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5784 }
5785
5786 /*
5787 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5788 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5789 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5790 * subsequent VM-entry would fail, see @bugref{7445}.
5791 *
5792 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5793 */
5794 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5795 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5796 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5797 && CPUMIsGuestNmiBlocking(pVCpu))
5798 {
5799 CPUMSetGuestNmiBlocking(pVCpu, false);
5800 }
5801
5802 switch (enmRaise)
5803 {
5804 case IEMXCPTRAISE_CURRENT_XCPT:
5805 {
5806 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5807 Assert(rcStrict == VINF_SUCCESS);
5808 break;
5809 }
5810
5811 case IEMXCPTRAISE_PREV_EVENT:
5812 {
5813 uint32_t u32ErrCode;
5814 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5815 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5816 else
5817 u32ErrCode = 0;
5818
5819 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5820 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5821 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5822 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5823
5824 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5825 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5826 Assert(rcStrict == VINF_SUCCESS);
5827 break;
5828 }
5829
5830 case IEMXCPTRAISE_REEXEC_INSTR:
5831 Assert(rcStrict == VINF_SUCCESS);
5832 break;
5833
5834 case IEMXCPTRAISE_DOUBLE_FAULT:
5835 {
5836 /*
5837 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5838 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5839 */
5840 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5841 {
5842 pVmxTransient->fVectoringDoublePF = true;
5843 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5844 pVCpu->cpum.GstCtx.cr2));
5845 rcStrict = VINF_SUCCESS;
5846 }
5847 else
5848 {
5849 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5850 vmxHCSetPendingXcptDF(pVCpu);
5851 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5852 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5853 rcStrict = VINF_HM_DOUBLE_FAULT;
5854 }
5855 break;
5856 }
5857
5858 case IEMXCPTRAISE_TRIPLE_FAULT:
5859 {
5860 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5861 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5862 rcStrict = VINF_EM_RESET;
5863 break;
5864 }
5865
5866 case IEMXCPTRAISE_CPU_HANG:
5867 {
5868 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5869 rcStrict = VERR_EM_GUEST_CPU_HANG;
5870 break;
5871 }
5872
5873 default:
5874 {
5875 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5876 rcStrict = VERR_VMX_IPE_2;
5877 break;
5878 }
5879 }
5880 }
5881 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5882 && !CPUMIsGuestNmiBlocking(pVCpu))
5883 {
5884 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5885 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5886 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5887 {
5888 /*
5889 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5890 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5891 * that virtual NMIs remain blocked until the IRET execution is completed.
5892 *
5893 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5894 */
5895 CPUMSetGuestNmiBlocking(pVCpu, true);
5896 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5897 }
5898 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5899 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5900 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5901 {
5902 /*
5903 * Execution of IRET caused an EPT violation, page-modification log-full event or
5904 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5905 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5906 * that virtual NMIs remain blocked until the IRET execution is completed.
5907 *
5908 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5909 */
5910 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5911 {
5912 CPUMSetGuestNmiBlocking(pVCpu, true);
5913 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5914 }
5915 }
5916 }
5917
5918 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5919 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5920 return rcStrict;
5921}
5922
5923
5924#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5925/**
5926 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5927 * guest attempting to execute a VMX instruction.
5928 *
5929 * @returns Strict VBox status code (i.e. informational status codes too).
5930 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5931 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5932 *
5933 * @param pVCpu The cross context virtual CPU structure.
5934 * @param uExitReason The VM-exit reason.
5935 *
5936 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5937 * @remarks No-long-jump zone!!!
5938 */
5939static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5940{
5941 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5942 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5943
5944 /*
5945 * The physical CPU would have already checked the CPU mode/code segment.
5946 * We shall just assert here for paranoia.
5947 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5948 */
5949 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5950 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5951 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5952
5953 if (uExitReason == VMX_EXIT_VMXON)
5954 {
5955 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5956
5957 /*
5958 * We check CR4.VMXE because it is required to be always set while in VMX operation
5959 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5960 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5961 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5962 */
5963 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5964 {
5965 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5966 vmxHCSetPendingXcptUD(pVCpu);
5967 return VINF_HM_PENDING_XCPT;
5968 }
5969 }
5970 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5971 {
5972 /*
5973 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5974 * (other than VMXON), we need to raise a #UD.
5975 */
5976 Log4Func(("Not in VMX root mode -> #UD\n"));
5977 vmxHCSetPendingXcptUD(pVCpu);
5978 return VINF_HM_PENDING_XCPT;
5979 }
5980
5981 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5982 return VINF_SUCCESS;
5983}
5984
5985
5986/**
5987 * Decodes the memory operand of an instruction that caused a VM-exit.
5988 *
5989 * The Exit qualification field provides the displacement field for memory
5990 * operand instructions, if any.
5991 *
5992 * @returns Strict VBox status code (i.e. informational status codes too).
5993 * @retval VINF_SUCCESS if the operand was successfully decoded.
5994 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5995 * operand.
5996 * @param pVCpu The cross context virtual CPU structure.
5997 * @param uExitInstrInfo The VM-exit instruction information field.
5998 * @param enmMemAccess The memory operand's access type (read or write).
5999 * @param GCPtrDisp The instruction displacement field, if any. For
6000 * RIP-relative addressing pass RIP + displacement here.
6001 * @param pGCPtrMem Where to store the effective destination memory address.
6002 *
6003 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6004 * virtual-8086 mode hence skips those checks while verifying if the
6005 * segment is valid.
6006 */
6007static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6008 PRTGCPTR pGCPtrMem)
6009{
6010 Assert(pGCPtrMem);
6011 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6012 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6013 | CPUMCTX_EXTRN_CR0);
6014
6015 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6016 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6017 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6018
6019 VMXEXITINSTRINFO ExitInstrInfo;
6020 ExitInstrInfo.u = uExitInstrInfo;
6021 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6022 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6023 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6024 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6025 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6026 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6027 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6028 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6029 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6030
6031 /*
6032 * Validate instruction information.
6033 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6034 */
6035 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6036 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6037 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6038 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6039 AssertLogRelMsgReturn(fIsMemOperand,
6040 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6041
6042 /*
6043 * Compute the complete effective address.
6044 *
6045 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6046 * See AMD spec. 4.5.2 "Segment Registers".
6047 */
6048 RTGCPTR GCPtrMem = GCPtrDisp;
6049 if (fBaseRegValid)
6050 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6051 if (fIdxRegValid)
6052 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6053
6054 RTGCPTR const GCPtrOff = GCPtrMem;
6055 if ( !fIsLongMode
6056 || iSegReg >= X86_SREG_FS)
6057 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6058 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6059
6060 /*
6061 * Validate effective address.
6062 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6063 */
6064 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6065 Assert(cbAccess > 0);
6066 if (fIsLongMode)
6067 {
6068 if (X86_IS_CANONICAL(GCPtrMem))
6069 {
6070 *pGCPtrMem = GCPtrMem;
6071 return VINF_SUCCESS;
6072 }
6073
6074 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6075 * "Data Limit Checks in 64-bit Mode". */
6076 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6077 vmxHCSetPendingXcptGP(pVCpu, 0);
6078 return VINF_HM_PENDING_XCPT;
6079 }
6080
6081 /*
6082 * This is a watered down version of iemMemApplySegment().
6083 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6084 * and segment CPL/DPL checks are skipped.
6085 */
6086 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6087 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6088 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6089
6090 /* Check if the segment is present and usable. */
6091 if ( pSel->Attr.n.u1Present
6092 && !pSel->Attr.n.u1Unusable)
6093 {
6094 Assert(pSel->Attr.n.u1DescType);
6095 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6096 {
6097 /* Check permissions for the data segment. */
6098 if ( enmMemAccess == VMXMEMACCESS_WRITE
6099 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6100 {
6101 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6102 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6103 return VINF_HM_PENDING_XCPT;
6104 }
6105
6106 /* Check limits if it's a normal data segment. */
6107 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6108 {
6109 if ( GCPtrFirst32 > pSel->u32Limit
6110 || GCPtrLast32 > pSel->u32Limit)
6111 {
6112 Log4Func(("Data segment limit exceeded. "
6113 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6114 GCPtrLast32, pSel->u32Limit));
6115 if (iSegReg == X86_SREG_SS)
6116 vmxHCSetPendingXcptSS(pVCpu, 0);
6117 else
6118 vmxHCSetPendingXcptGP(pVCpu, 0);
6119 return VINF_HM_PENDING_XCPT;
6120 }
6121 }
6122 else
6123 {
6124 /* Check limits if it's an expand-down data segment.
6125 Note! The upper boundary is defined by the B bit, not the G bit! */
6126 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6127 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6128 {
6129 Log4Func(("Expand-down data segment limit exceeded. "
6130 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6131 GCPtrLast32, pSel->u32Limit));
6132 if (iSegReg == X86_SREG_SS)
6133 vmxHCSetPendingXcptSS(pVCpu, 0);
6134 else
6135 vmxHCSetPendingXcptGP(pVCpu, 0);
6136 return VINF_HM_PENDING_XCPT;
6137 }
6138 }
6139 }
6140 else
6141 {
6142 /* Check permissions for the code segment. */
6143 if ( enmMemAccess == VMXMEMACCESS_WRITE
6144 || ( enmMemAccess == VMXMEMACCESS_READ
6145 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6146 {
6147 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6148 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6149 vmxHCSetPendingXcptGP(pVCpu, 0);
6150 return VINF_HM_PENDING_XCPT;
6151 }
6152
6153 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6154 if ( GCPtrFirst32 > pSel->u32Limit
6155 || GCPtrLast32 > pSel->u32Limit)
6156 {
6157 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6158 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6159 if (iSegReg == X86_SREG_SS)
6160 vmxHCSetPendingXcptSS(pVCpu, 0);
6161 else
6162 vmxHCSetPendingXcptGP(pVCpu, 0);
6163 return VINF_HM_PENDING_XCPT;
6164 }
6165 }
6166 }
6167 else
6168 {
6169 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6170 vmxHCSetPendingXcptGP(pVCpu, 0);
6171 return VINF_HM_PENDING_XCPT;
6172 }
6173
6174 *pGCPtrMem = GCPtrMem;
6175 return VINF_SUCCESS;
6176}
6177#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6178
6179
6180/**
6181 * VM-exit helper for LMSW.
6182 */
6183static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6184{
6185 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6186 AssertRCReturn(rc, rc);
6187
6188 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6189 AssertMsg( rcStrict == VINF_SUCCESS
6190 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6191
6192 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6193 if (rcStrict == VINF_IEM_RAISED_XCPT)
6194 {
6195 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6196 rcStrict = VINF_SUCCESS;
6197 }
6198
6199 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6200 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6201 return rcStrict;
6202}
6203
6204
6205/**
6206 * VM-exit helper for CLTS.
6207 */
6208static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6209{
6210 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6211 AssertRCReturn(rc, rc);
6212
6213 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6214 AssertMsg( rcStrict == VINF_SUCCESS
6215 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6216
6217 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6218 if (rcStrict == VINF_IEM_RAISED_XCPT)
6219 {
6220 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6221 rcStrict = VINF_SUCCESS;
6222 }
6223
6224 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6225 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6226 return rcStrict;
6227}
6228
6229
6230/**
6231 * VM-exit helper for MOV from CRx (CRx read).
6232 */
6233static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6234{
6235 Assert(iCrReg < 16);
6236 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6237
6238 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6239 AssertRCReturn(rc, rc);
6240
6241 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6242 AssertMsg( rcStrict == VINF_SUCCESS
6243 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6244
6245 if (iGReg == X86_GREG_xSP)
6246 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6247 else
6248 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6249#ifdef VBOX_WITH_STATISTICS
6250 switch (iCrReg)
6251 {
6252 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6253 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6254 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6255 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6256 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6257 }
6258#endif
6259 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6260 return rcStrict;
6261}
6262
6263
6264/**
6265 * VM-exit helper for MOV to CRx (CRx write).
6266 */
6267static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6268{
6269 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6270
6271 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6272 AssertMsg( rcStrict == VINF_SUCCESS
6273 || rcStrict == VINF_IEM_RAISED_XCPT
6274 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6275
6276 switch (iCrReg)
6277 {
6278 case 0:
6279 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6280 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6281 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6282 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6283 break;
6284
6285 case 2:
6286 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6287 /* Nothing to do here, CR2 it's not part of the VMCS. */
6288 break;
6289
6290 case 3:
6291 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6292 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6293 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6294 break;
6295
6296 case 4:
6297 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6298 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6299#ifndef IN_NEM_DARWIN
6300 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6301 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6302#else
6303 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6304#endif
6305 break;
6306
6307 case 8:
6308 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6309 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6310 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6311 break;
6312
6313 default:
6314 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6315 break;
6316 }
6317
6318 if (rcStrict == VINF_IEM_RAISED_XCPT)
6319 {
6320 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6321 rcStrict = VINF_SUCCESS;
6322 }
6323 return rcStrict;
6324}
6325
6326
6327/**
6328 * VM-exit exception handler for \#PF (Page-fault exception).
6329 *
6330 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6331 */
6332static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6333{
6334 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6335 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6336
6337#ifndef IN_NEM_DARWIN
6338 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6339 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6340 { /* likely */ }
6341 else
6342#endif
6343 {
6344#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6345 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6346#endif
6347 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6348 if (!pVmxTransient->fVectoringDoublePF)
6349 {
6350 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6351 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6352 }
6353 else
6354 {
6355 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6356 Assert(!pVmxTransient->fIsNestedGuest);
6357 vmxHCSetPendingXcptDF(pVCpu);
6358 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6359 }
6360 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6361 return VINF_SUCCESS;
6362 }
6363
6364 Assert(!pVmxTransient->fIsNestedGuest);
6365
6366 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6367 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6368 if (pVmxTransient->fVectoringPF)
6369 {
6370 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6371 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6372 }
6373
6374 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6375 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6376 AssertRCReturn(rc, rc);
6377
6378 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6379 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6380
6381 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6382 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6383
6384 Log4Func(("#PF: rc=%Rrc\n", rc));
6385 if (rc == VINF_SUCCESS)
6386 {
6387 /*
6388 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6389 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6390 */
6391 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6392 TRPMResetTrap(pVCpu);
6393 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6394 return rc;
6395 }
6396
6397 if (rc == VINF_EM_RAW_GUEST_TRAP)
6398 {
6399 if (!pVmxTransient->fVectoringDoublePF)
6400 {
6401 /* It's a guest page fault and needs to be reflected to the guest. */
6402 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6403 TRPMResetTrap(pVCpu);
6404 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6405 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6406 uGstErrorCode, pVmxTransient->uExitQual);
6407 }
6408 else
6409 {
6410 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6411 TRPMResetTrap(pVCpu);
6412 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6413 vmxHCSetPendingXcptDF(pVCpu);
6414 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6415 }
6416
6417 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6418 return VINF_SUCCESS;
6419 }
6420
6421 TRPMResetTrap(pVCpu);
6422 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6423 return rc;
6424}
6425
6426
6427/**
6428 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6429 *
6430 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6431 */
6432static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6433{
6434 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6435 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6436
6437 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6438 AssertRCReturn(rc, rc);
6439
6440 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6441 {
6442 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6443 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6444
6445 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6446 * provides VM-exit instruction length. If this causes problem later,
6447 * disassemble the instruction like it's done on AMD-V. */
6448 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6449 AssertRCReturn(rc2, rc2);
6450 return rc;
6451 }
6452
6453 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6454 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6455 return VINF_SUCCESS;
6456}
6457
6458
6459/**
6460 * VM-exit exception handler for \#BP (Breakpoint exception).
6461 *
6462 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6463 */
6464static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6465{
6466 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6467 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6468
6469 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6470 AssertRCReturn(rc, rc);
6471
6472 VBOXSTRICTRC rcStrict;
6473 if (!pVmxTransient->fIsNestedGuest)
6474 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6475 else
6476 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6477
6478 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6479 {
6480 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6481 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6482 rcStrict = VINF_SUCCESS;
6483 }
6484
6485 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6486 return rcStrict;
6487}
6488
6489
6490/**
6491 * VM-exit exception handler for \#AC (Alignment-check exception).
6492 *
6493 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6494 */
6495static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6496{
6497 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6498
6499 /*
6500 * Detect #ACs caused by host having enabled split-lock detection.
6501 * Emulate such instructions.
6502 */
6503 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6504 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6505 AssertRCReturn(rc, rc);
6506 /** @todo detect split lock in cpu feature? */
6507 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6508 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6509 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6510 || CPUMGetGuestCPL(pVCpu) != 3
6511 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6512 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6513 {
6514 /*
6515 * Check for debug/trace events and import state accordingly.
6516 */
6517 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6518 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6519 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6520#ifndef IN_NEM_DARWIN
6521 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6522#endif
6523 )
6524 {
6525 if (pVM->cCpus == 1)
6526 {
6527#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6528 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6529#else
6530 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6531#endif
6532 AssertRCReturn(rc, rc);
6533 }
6534 }
6535 else
6536 {
6537 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6538 AssertRCReturn(rc, rc);
6539
6540 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6541
6542 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6543 {
6544 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6545 if (rcStrict != VINF_SUCCESS)
6546 return rcStrict;
6547 }
6548 }
6549
6550 /*
6551 * Emulate the instruction.
6552 *
6553 * We have to ignore the LOCK prefix here as we must not retrigger the
6554 * detection on the host. This isn't all that satisfactory, though...
6555 */
6556 if (pVM->cCpus == 1)
6557 {
6558 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6559 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6560
6561 /** @todo For SMP configs we should do a rendezvous here. */
6562 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6563 if (rcStrict == VINF_SUCCESS)
6564#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6565 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6566 HM_CHANGED_GUEST_RIP
6567 | HM_CHANGED_GUEST_RFLAGS
6568 | HM_CHANGED_GUEST_GPRS_MASK
6569 | HM_CHANGED_GUEST_CS
6570 | HM_CHANGED_GUEST_SS);
6571#else
6572 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6573#endif
6574 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6575 {
6576 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6577 rcStrict = VINF_SUCCESS;
6578 }
6579 return rcStrict;
6580 }
6581 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6582 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6583 return VINF_EM_EMULATE_SPLIT_LOCK;
6584 }
6585
6586 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6587 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6588 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6589
6590 /* Re-inject it. We'll detect any nesting before getting here. */
6591 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6592 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6593 return VINF_SUCCESS;
6594}
6595
6596
6597/**
6598 * VM-exit exception handler for \#DB (Debug exception).
6599 *
6600 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6601 */
6602static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6603{
6604 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6605 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6606
6607 /*
6608 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6609 */
6610 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6611
6612 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6613 uint64_t const uDR6 = X86_DR6_INIT_VAL
6614 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6615 | X86_DR6_BD | X86_DR6_BS));
6616
6617 int rc;
6618 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6619 if (!pVmxTransient->fIsNestedGuest)
6620 {
6621 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6622
6623 /*
6624 * Prevents stepping twice over the same instruction when the guest is stepping using
6625 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6626 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6627 */
6628 if ( rc == VINF_EM_DBG_STEPPED
6629 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6630 {
6631 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6632 rc = VINF_EM_RAW_GUEST_TRAP;
6633 }
6634 }
6635 else
6636 rc = VINF_EM_RAW_GUEST_TRAP;
6637 Log6Func(("rc=%Rrc\n", rc));
6638 if (rc == VINF_EM_RAW_GUEST_TRAP)
6639 {
6640 /*
6641 * The exception was for the guest. Update DR6, DR7.GD and
6642 * IA32_DEBUGCTL.LBR before forwarding it.
6643 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6644 */
6645#ifndef IN_NEM_DARWIN
6646 VMMRZCallRing3Disable(pVCpu);
6647 HM_DISABLE_PREEMPT(pVCpu);
6648
6649 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6650 pCtx->dr[6] |= uDR6;
6651 if (CPUMIsGuestDebugStateActive(pVCpu))
6652 ASMSetDR6(pCtx->dr[6]);
6653
6654 HM_RESTORE_PREEMPT();
6655 VMMRZCallRing3Enable(pVCpu);
6656#else
6657 /** @todo */
6658#endif
6659
6660 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6661 AssertRCReturn(rc, rc);
6662
6663 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6664 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6665
6666 /* Paranoia. */
6667 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6668 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6669
6670 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6671 AssertRC(rc);
6672
6673 /*
6674 * Raise #DB in the guest.
6675 *
6676 * It is important to reflect exactly what the VM-exit gave us (preserving the
6677 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6678 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6679 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6680 *
6681 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6682 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6683 */
6684 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6685 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6686 return VINF_SUCCESS;
6687 }
6688
6689 /*
6690 * Not a guest trap, must be a hypervisor related debug event then.
6691 * Update DR6 in case someone is interested in it.
6692 */
6693 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6694 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6695 CPUMSetHyperDR6(pVCpu, uDR6);
6696
6697 return rc;
6698}
6699
6700
6701/**
6702 * Hacks its way around the lovely mesa driver's backdoor accesses.
6703 *
6704 * @sa hmR0SvmHandleMesaDrvGp.
6705 */
6706static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6707{
6708 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6709 RT_NOREF(pCtx);
6710
6711 /* For now we'll just skip the instruction. */
6712 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6713}
6714
6715
6716/**
6717 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6718 * backdoor logging w/o checking what it is running inside.
6719 *
6720 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6721 * backdoor port and magic numbers loaded in registers.
6722 *
6723 * @returns true if it is, false if it isn't.
6724 * @sa hmR0SvmIsMesaDrvGp.
6725 */
6726DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6727{
6728 /* 0xed: IN eAX,dx */
6729 uint8_t abInstr[1];
6730 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6731 return false;
6732
6733 /* Check that it is #GP(0). */
6734 if (pVmxTransient->uExitIntErrorCode != 0)
6735 return false;
6736
6737 /* Check magic and port. */
6738 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6739 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6740 if (pCtx->rax != UINT32_C(0x564d5868))
6741 return false;
6742 if (pCtx->dx != UINT32_C(0x5658))
6743 return false;
6744
6745 /* Flat ring-3 CS. */
6746 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6747 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6748 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6749 if (pCtx->cs.Attr.n.u2Dpl != 3)
6750 return false;
6751 if (pCtx->cs.u64Base != 0)
6752 return false;
6753
6754 /* Check opcode. */
6755 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6756 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6757 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6758 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6759 if (RT_FAILURE(rc))
6760 return false;
6761 if (abInstr[0] != 0xed)
6762 return false;
6763
6764 return true;
6765}
6766
6767
6768/**
6769 * VM-exit exception handler for \#GP (General-protection exception).
6770 *
6771 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6772 */
6773static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6774{
6775 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6776 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6777
6778 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6779 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6780#ifndef IN_NEM_DARWIN
6781 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6782 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6783 { /* likely */ }
6784 else
6785#endif
6786 {
6787#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6788# ifndef IN_NEM_DARWIN
6789 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6790# else
6791 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6792# endif
6793#endif
6794 /*
6795 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6796 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6797 */
6798 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6799 AssertRCReturn(rc, rc);
6800 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6801 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6802
6803 if ( pVmxTransient->fIsNestedGuest
6804 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6805 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6806 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6807 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6808 else
6809 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6810 return rc;
6811 }
6812
6813#ifndef IN_NEM_DARWIN
6814 Assert(CPUMIsGuestInRealModeEx(pCtx));
6815 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6816 Assert(!pVmxTransient->fIsNestedGuest);
6817
6818 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6819 AssertRCReturn(rc, rc);
6820
6821 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6822 if (rcStrict == VINF_SUCCESS)
6823 {
6824 if (!CPUMIsGuestInRealModeEx(pCtx))
6825 {
6826 /*
6827 * The guest is no longer in real-mode, check if we can continue executing the
6828 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6829 */
6830 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6831 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6832 {
6833 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6834 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6835 }
6836 else
6837 {
6838 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6839 rcStrict = VINF_EM_RESCHEDULE;
6840 }
6841 }
6842 else
6843 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6844 }
6845 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6846 {
6847 rcStrict = VINF_SUCCESS;
6848 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6849 }
6850 return VBOXSTRICTRC_VAL(rcStrict);
6851#endif
6852}
6853
6854
6855/**
6856 * VM-exit exception handler for \#DE (Divide Error).
6857 *
6858 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6859 */
6860static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6861{
6862 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6863 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
6864
6865 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6866 AssertRCReturn(rc, rc);
6867
6868 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
6869 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
6870 {
6871 uint8_t cbInstr = 0;
6872 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
6873 if (rc2 == VINF_SUCCESS)
6874 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
6875 else if (rc2 == VERR_NOT_FOUND)
6876 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
6877 else
6878 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
6879 }
6880 else
6881 rcStrict = VINF_SUCCESS; /* Do nothing. */
6882
6883 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
6884 if (RT_FAILURE(rcStrict))
6885 {
6886 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6887 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6888 rcStrict = VINF_SUCCESS;
6889 }
6890
6891 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
6892 return VBOXSTRICTRC_VAL(rcStrict);
6893}
6894
6895
6896/**
6897 * VM-exit exception handler wrapper for all other exceptions that are not handled
6898 * by a specific handler.
6899 *
6900 * This simply re-injects the exception back into the VM without any special
6901 * processing.
6902 *
6903 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6904 */
6905static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6906{
6907 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6908
6909#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6910# ifndef IN_NEM_DARWIN
6911 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6912 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6913 ("uVector=%#x u32XcptBitmap=%#X32\n",
6914 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6915 NOREF(pVmcsInfo);
6916# endif
6917#endif
6918
6919 /*
6920 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6921 * would have been handled while checking exits due to event delivery.
6922 */
6923 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6924
6925#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6926 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6927 AssertRCReturn(rc, rc);
6928 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6929#endif
6930
6931#ifdef VBOX_WITH_STATISTICS
6932 switch (uVector)
6933 {
6934 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6935 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6936 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6937 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6938 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6939 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6940 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6941 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6942 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6943 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6944 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6945 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6946 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6947 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6948 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6949 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6950 default:
6951 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6952 break;
6953 }
6954#endif
6955
6956 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6957 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6958 NOREF(uVector);
6959
6960 /* Re-inject the original exception into the guest. */
6961 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6962 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6963 return VINF_SUCCESS;
6964}
6965
6966
6967/**
6968 * VM-exit exception handler for all exceptions (except NMIs!).
6969 *
6970 * @remarks This may be called for both guests and nested-guests. Take care to not
6971 * make assumptions and avoid doing anything that is not relevant when
6972 * executing a nested-guest (e.g., Mesa driver hacks).
6973 */
6974static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6975{
6976 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6977
6978 /*
6979 * If this VM-exit occurred while delivering an event through the guest IDT, take
6980 * action based on the return code and additional hints (e.g. for page-faults)
6981 * that will be updated in the VMX transient structure.
6982 */
6983 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6984 if (rcStrict == VINF_SUCCESS)
6985 {
6986 /*
6987 * If an exception caused a VM-exit due to delivery of an event, the original
6988 * event may have to be re-injected into the guest. We shall reinject it and
6989 * continue guest execution. However, page-fault is a complicated case and
6990 * needs additional processing done in vmxHCExitXcptPF().
6991 */
6992 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6993 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6994 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6995 || uVector == X86_XCPT_PF)
6996 {
6997 switch (uVector)
6998 {
6999 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7000 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7001 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7002 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7003 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7004 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7005 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7006 default:
7007 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7008 }
7009 }
7010 /* else: inject pending event before resuming guest execution. */
7011 }
7012 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7013 {
7014 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7015 rcStrict = VINF_SUCCESS;
7016 }
7017
7018 return rcStrict;
7019}
7020/** @} */
7021
7022
7023/** @name VM-exit handlers.
7024 * @{
7025 */
7026/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7027/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7028/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7029
7030/**
7031 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7032 */
7033HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7034{
7035 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7036 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7037
7038#ifndef IN_NEM_DARWIN
7039 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7040 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7041 return VINF_SUCCESS;
7042 return VINF_EM_RAW_INTERRUPT;
7043#else
7044 return VINF_SUCCESS;
7045#endif
7046}
7047
7048
7049/**
7050 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7051 * VM-exit.
7052 */
7053HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7054{
7055 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7056 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7057
7058 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7059
7060 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7061 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7062 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7063
7064 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7065 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7066 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7067 NOREF(pVmcsInfo);
7068
7069 VBOXSTRICTRC rcStrict;
7070 switch (uExitIntType)
7071 {
7072#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7073 /*
7074 * Host physical NMIs:
7075 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7076 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7077 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7078 *
7079 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7080 * See Intel spec. 27.5.5 "Updating Non-Register State".
7081 */
7082 case VMX_EXIT_INT_INFO_TYPE_NMI:
7083 {
7084 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7085 break;
7086 }
7087#endif
7088
7089 /*
7090 * Privileged software exceptions (#DB from ICEBP),
7091 * Software exceptions (#BP and #OF),
7092 * Hardware exceptions:
7093 * Process the required exceptions and resume guest execution if possible.
7094 */
7095 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7096 Assert(uVector == X86_XCPT_DB);
7097 RT_FALL_THRU();
7098 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7099 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7100 RT_FALL_THRU();
7101 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7102 {
7103 NOREF(uVector);
7104 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7105 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7106 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7107 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7108
7109 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7110 break;
7111 }
7112
7113 default:
7114 {
7115 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7116 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7117 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7118 break;
7119 }
7120 }
7121
7122 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7123 return rcStrict;
7124}
7125
7126
7127/**
7128 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7129 */
7130HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7131{
7132 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7133
7134 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7135 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7136 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7137
7138 /* Evaluate and deliver pending events and resume guest execution. */
7139 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7140 return VINF_SUCCESS;
7141}
7142
7143
7144/**
7145 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7146 */
7147HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7148{
7149 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7150
7151 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7152 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7153 {
7154 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7155 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7156 }
7157
7158 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7159
7160 /*
7161 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7162 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7163 */
7164 uint32_t fIntrState;
7165 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7166 AssertRC(rc);
7167 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7168 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7169 {
7170 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7171 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7172
7173 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7174 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7175 AssertRC(rc);
7176 }
7177
7178 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7179 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7180
7181 /* Evaluate and deliver pending events and resume guest execution. */
7182 return VINF_SUCCESS;
7183}
7184
7185
7186/**
7187 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7188 */
7189HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7190{
7191 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7192 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7193}
7194
7195
7196/**
7197 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7198 */
7199HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7200{
7201 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7202 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7203}
7204
7205
7206/**
7207 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7208 */
7209HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7210{
7211 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7212
7213 /*
7214 * Get the state we need and update the exit history entry.
7215 */
7216 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7217 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7218
7219 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7220 AssertRCReturn(rc, rc);
7221
7222 VBOXSTRICTRC rcStrict;
7223 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7224 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7225 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7226 if (!pExitRec)
7227 {
7228 /*
7229 * Regular CPUID instruction execution.
7230 */
7231 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7232 if (rcStrict == VINF_SUCCESS)
7233 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7234 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7235 {
7236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7237 rcStrict = VINF_SUCCESS;
7238 }
7239 }
7240 else
7241 {
7242 /*
7243 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7244 */
7245 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7246 AssertRCReturn(rc2, rc2);
7247
7248 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7249 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7250
7251 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7252 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7253
7254 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7255 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7256 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7257 }
7258 return rcStrict;
7259}
7260
7261
7262/**
7263 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7264 */
7265HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7266{
7267 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7268
7269 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7270 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7271 AssertRCReturn(rc, rc);
7272
7273 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7274 return VINF_EM_RAW_EMULATE_INSTR;
7275
7276 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7277 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7278}
7279
7280
7281/**
7282 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7283 */
7284HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7285{
7286 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7287
7288 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7289 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7290 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7291 AssertRCReturn(rc, rc);
7292
7293 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7294 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7295 {
7296 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7297 we must reset offsetting on VM-entry. See @bugref{6634}. */
7298 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7299 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7301 }
7302 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7303 {
7304 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7305 rcStrict = VINF_SUCCESS;
7306 }
7307 return rcStrict;
7308}
7309
7310
7311/**
7312 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7313 */
7314HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7315{
7316 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7317
7318 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7319 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7320 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7321 AssertRCReturn(rc, rc);
7322
7323 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7324 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7325 {
7326 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7327 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7328 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7329 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7330 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7331 }
7332 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7333 {
7334 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7335 rcStrict = VINF_SUCCESS;
7336 }
7337 return rcStrict;
7338}
7339
7340
7341/**
7342 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7343 */
7344HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7345{
7346 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7347
7348 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7349 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7350 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7351 AssertRCReturn(rc, rc);
7352
7353 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7354 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7355 if (RT_LIKELY(rc == VINF_SUCCESS))
7356 {
7357 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7358 Assert(pVmxTransient->cbExitInstr == 2);
7359 }
7360 else
7361 {
7362 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7363 rc = VERR_EM_INTERPRETER;
7364 }
7365 return rc;
7366}
7367
7368
7369/**
7370 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7371 */
7372HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7373{
7374 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7375
7376 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7377 if (EMAreHypercallInstructionsEnabled(pVCpu))
7378 {
7379 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7380 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7381 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7382 AssertRCReturn(rc, rc);
7383
7384 /* Perform the hypercall. */
7385 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7386 if (rcStrict == VINF_SUCCESS)
7387 {
7388 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7389 AssertRCReturn(rc, rc);
7390 }
7391 else
7392 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7393 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7394 || RT_FAILURE(rcStrict));
7395
7396 /* If the hypercall changes anything other than guest's general-purpose registers,
7397 we would need to reload the guest changed bits here before VM-entry. */
7398 }
7399 else
7400 Log4Func(("Hypercalls not enabled\n"));
7401
7402 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7403 if (RT_FAILURE(rcStrict))
7404 {
7405 vmxHCSetPendingXcptUD(pVCpu);
7406 rcStrict = VINF_SUCCESS;
7407 }
7408
7409 return rcStrict;
7410}
7411
7412
7413/**
7414 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7415 */
7416HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7417{
7418 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7419#ifndef IN_NEM_DARWIN
7420 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7421#endif
7422
7423 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7424 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7425 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7426 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7427 AssertRCReturn(rc, rc);
7428
7429 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7430
7431 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7432 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7433 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7434 {
7435 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7436 rcStrict = VINF_SUCCESS;
7437 }
7438 else
7439 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7440 VBOXSTRICTRC_VAL(rcStrict)));
7441 return rcStrict;
7442}
7443
7444
7445/**
7446 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7447 */
7448HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7449{
7450 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7451
7452 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7453 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7454 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7455 AssertRCReturn(rc, rc);
7456
7457 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7458 if (rcStrict == VINF_SUCCESS)
7459 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7460 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7461 {
7462 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7463 rcStrict = VINF_SUCCESS;
7464 }
7465
7466 return rcStrict;
7467}
7468
7469
7470/**
7471 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7472 */
7473HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7474{
7475 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7476
7477 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7478 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7479 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7480 AssertRCReturn(rc, rc);
7481
7482 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7483 if (RT_SUCCESS(rcStrict))
7484 {
7485 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7486 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7487 rcStrict = VINF_SUCCESS;
7488 }
7489
7490 return rcStrict;
7491}
7492
7493
7494/**
7495 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7496 * VM-exit.
7497 */
7498HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7499{
7500 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7501 return VINF_EM_RESET;
7502}
7503
7504
7505/**
7506 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7507 */
7508HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7509{
7510 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7511
7512 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7513 AssertRCReturn(rc, rc);
7514
7515 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7516 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7517 rc = VINF_SUCCESS;
7518 else
7519 rc = VINF_EM_HALT;
7520
7521 if (rc != VINF_SUCCESS)
7522 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7523 return rc;
7524}
7525
7526
7527/**
7528 * VM-exit handler for instructions that result in a \#UD exception delivered to
7529 * the guest.
7530 */
7531HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7532{
7533 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7534 vmxHCSetPendingXcptUD(pVCpu);
7535 return VINF_SUCCESS;
7536}
7537
7538
7539/**
7540 * VM-exit handler for expiry of the VMX-preemption timer.
7541 */
7542HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7543{
7544 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7545
7546 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7547 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7548Log12(("vmxHCExitPreemptTimer:\n"));
7549
7550 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7551 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7552 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7553 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7554 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7555}
7556
7557
7558/**
7559 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7560 */
7561HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7562{
7563 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7564
7565 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7566 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7567 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7568 AssertRCReturn(rc, rc);
7569
7570 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7571 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7572 : HM_CHANGED_RAISED_XCPT_MASK);
7573
7574#ifndef IN_NEM_DARWIN
7575 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7576 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7577 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7578 {
7579 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7580 hmR0VmxUpdateStartVmFunction(pVCpu);
7581 }
7582#endif
7583
7584 return rcStrict;
7585}
7586
7587
7588/**
7589 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7590 */
7591HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7592{
7593 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7594
7595 /** @todo Enable the new code after finding a reliably guest test-case. */
7596#if 1
7597 return VERR_EM_INTERPRETER;
7598#else
7599 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7600 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7601 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7602 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7603 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7604 AssertRCReturn(rc, rc);
7605
7606 /* Paranoia. Ensure this has a memory operand. */
7607 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7608
7609 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7610 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7611 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7612 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7613
7614 RTGCPTR GCPtrDesc;
7615 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7616
7617 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7618 GCPtrDesc, uType);
7619 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7620 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7621 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7622 {
7623 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7624 rcStrict = VINF_SUCCESS;
7625 }
7626 return rcStrict;
7627#endif
7628}
7629
7630
7631/**
7632 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7633 * VM-exit.
7634 */
7635HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7636{
7637 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7638 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7639 AssertRCReturn(rc, rc);
7640
7641 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7642 if (RT_FAILURE(rc))
7643 return rc;
7644
7645 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7646 NOREF(uInvalidReason);
7647
7648#ifdef VBOX_STRICT
7649 uint32_t fIntrState;
7650 uint64_t u64Val;
7651 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7652 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7653 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7654
7655 Log4(("uInvalidReason %u\n", uInvalidReason));
7656 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7657 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7658 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7659
7660 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7661 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7662 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7663 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7664 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7665 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7666 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7667 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7668 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7669 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7670 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7671 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7672# ifndef IN_NEM_DARWIN
7673 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7674 {
7675 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7676 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7677 }
7678
7679 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7680# endif
7681#endif
7682
7683 return VERR_VMX_INVALID_GUEST_STATE;
7684}
7685
7686/**
7687 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7688 */
7689HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7690{
7691 /*
7692 * Cumulative notes of all recognized but unexpected VM-exits.
7693 *
7694 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7695 * nested-paging is used.
7696 *
7697 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7698 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7699 * this function (and thereby stop VM execution) for handling such instructions.
7700 *
7701 *
7702 * VMX_EXIT_INIT_SIGNAL:
7703 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7704 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7705 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7706 *
7707 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7708 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7709 * See Intel spec. "23.8 Restrictions on VMX operation".
7710 *
7711 * VMX_EXIT_SIPI:
7712 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7713 * activity state is used. We don't make use of it as our guests don't have direct
7714 * access to the host local APIC.
7715 *
7716 * See Intel spec. 25.3 "Other Causes of VM-exits".
7717 *
7718 * VMX_EXIT_IO_SMI:
7719 * VMX_EXIT_SMI:
7720 * This can only happen if we support dual-monitor treatment of SMI, which can be
7721 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7722 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7723 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7724 *
7725 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7726 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7727 *
7728 * VMX_EXIT_ERR_MSR_LOAD:
7729 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7730 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7731 * execution.
7732 *
7733 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7734 *
7735 * VMX_EXIT_ERR_MACHINE_CHECK:
7736 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7737 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7738 * #MC exception abort class exception is raised. We thus cannot assume a
7739 * reasonable chance of continuing any sort of execution and we bail.
7740 *
7741 * See Intel spec. 15.1 "Machine-check Architecture".
7742 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7743 *
7744 * VMX_EXIT_PML_FULL:
7745 * VMX_EXIT_VIRTUALIZED_EOI:
7746 * VMX_EXIT_APIC_WRITE:
7747 * We do not currently support any of these features and thus they are all unexpected
7748 * VM-exits.
7749 *
7750 * VMX_EXIT_GDTR_IDTR_ACCESS:
7751 * VMX_EXIT_LDTR_TR_ACCESS:
7752 * VMX_EXIT_RDRAND:
7753 * VMX_EXIT_RSM:
7754 * VMX_EXIT_VMFUNC:
7755 * VMX_EXIT_ENCLS:
7756 * VMX_EXIT_RDSEED:
7757 * VMX_EXIT_XSAVES:
7758 * VMX_EXIT_XRSTORS:
7759 * VMX_EXIT_UMWAIT:
7760 * VMX_EXIT_TPAUSE:
7761 * VMX_EXIT_LOADIWKEY:
7762 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7763 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7764 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7765 *
7766 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7767 */
7768 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7769 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7770 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7771}
7772
7773
7774/**
7775 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7776 */
7777HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7778{
7779 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7780
7781 /** @todo Optimize this: We currently drag in the whole MSR state
7782 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7783 * MSRs required. That would require changes to IEM and possibly CPUM too.
7784 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7785 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7786 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7787 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7788 switch (idMsr)
7789 {
7790 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7791 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7792 }
7793
7794 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7795 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7796 AssertRCReturn(rc, rc);
7797
7798 Log4Func(("ecx=%#RX32\n", idMsr));
7799
7800#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7801 Assert(!pVmxTransient->fIsNestedGuest);
7802 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7803 {
7804 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7805 && idMsr != MSR_K6_EFER)
7806 {
7807 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7808 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7809 }
7810 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7811 {
7812 Assert(pVmcsInfo->pvMsrBitmap);
7813 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7814 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7815 {
7816 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7817 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7818 }
7819 }
7820 }
7821#endif
7822
7823 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7824 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7825 if (rcStrict == VINF_SUCCESS)
7826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7827 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7828 {
7829 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7830 rcStrict = VINF_SUCCESS;
7831 }
7832 else
7833 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7834 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7835
7836 return rcStrict;
7837}
7838
7839
7840/**
7841 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7842 */
7843HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7844{
7845 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7846
7847 /** @todo Optimize this: We currently drag in the whole MSR state
7848 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7849 * MSRs required. That would require changes to IEM and possibly CPUM too.
7850 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7851 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7852 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7853
7854 /*
7855 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7856 * Although we don't need to fetch the base as it will be overwritten shortly, while
7857 * loading guest-state we would also load the entire segment register including limit
7858 * and attributes and thus we need to load them here.
7859 */
7860 switch (idMsr)
7861 {
7862 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7863 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7864 }
7865
7866 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7867 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7868 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7869 AssertRCReturn(rc, rc);
7870
7871 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7872
7873 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7874 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7875
7876 if (rcStrict == VINF_SUCCESS)
7877 {
7878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7879
7880 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7881 if ( idMsr == MSR_IA32_APICBASE
7882 || ( idMsr >= MSR_IA32_X2APIC_START
7883 && idMsr <= MSR_IA32_X2APIC_END))
7884 {
7885 /*
7886 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7887 * When full APIC register virtualization is implemented we'll have to make
7888 * sure APIC state is saved from the VMCS before IEM changes it.
7889 */
7890 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7891 }
7892 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7893 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7894 else if (idMsr == MSR_K6_EFER)
7895 {
7896 /*
7897 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7898 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7899 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7900 */
7901 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7902 }
7903
7904 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7905 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7906 {
7907 switch (idMsr)
7908 {
7909 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7910 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7911 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7912 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7913 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7914 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7915 default:
7916 {
7917#ifndef IN_NEM_DARWIN
7918 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7919 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7920 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7921 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7922#else
7923 AssertMsgFailed(("TODO\n"));
7924#endif
7925 break;
7926 }
7927 }
7928 }
7929#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7930 else
7931 {
7932 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7933 switch (idMsr)
7934 {
7935 case MSR_IA32_SYSENTER_CS:
7936 case MSR_IA32_SYSENTER_EIP:
7937 case MSR_IA32_SYSENTER_ESP:
7938 case MSR_K8_FS_BASE:
7939 case MSR_K8_GS_BASE:
7940 {
7941 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7942 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7943 }
7944
7945 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7946 default:
7947 {
7948 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7949 {
7950 /* EFER MSR writes are always intercepted. */
7951 if (idMsr != MSR_K6_EFER)
7952 {
7953 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7954 idMsr));
7955 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7956 }
7957 }
7958
7959 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7960 {
7961 Assert(pVmcsInfo->pvMsrBitmap);
7962 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7963 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7964 {
7965 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7966 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7967 }
7968 }
7969 break;
7970 }
7971 }
7972 }
7973#endif /* VBOX_STRICT */
7974 }
7975 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7976 {
7977 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7978 rcStrict = VINF_SUCCESS;
7979 }
7980 else
7981 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7982 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7983
7984 return rcStrict;
7985}
7986
7987
7988/**
7989 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7990 */
7991HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7992{
7993 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7994
7995 /** @todo The guest has likely hit a contended spinlock. We might want to
7996 * poke a schedule different guest VCPU. */
7997 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7998 if (RT_SUCCESS(rc))
7999 return VINF_EM_RAW_INTERRUPT;
8000
8001 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8002 return rc;
8003}
8004
8005
8006/**
8007 * VM-exit handler for when the TPR value is lowered below the specified
8008 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8009 */
8010HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8011{
8012 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8013 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8014
8015 /*
8016 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8017 * We'll re-evaluate pending interrupts and inject them before the next VM
8018 * entry so we can just continue execution here.
8019 */
8020 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8021 return VINF_SUCCESS;
8022}
8023
8024
8025/**
8026 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8027 * VM-exit.
8028 *
8029 * @retval VINF_SUCCESS when guest execution can continue.
8030 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8031 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8032 * incompatible guest state for VMX execution (real-on-v86 case).
8033 */
8034HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8035{
8036 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8037 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8038
8039 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8040 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8041 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8042
8043 VBOXSTRICTRC rcStrict;
8044 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8045 uint64_t const uExitQual = pVmxTransient->uExitQual;
8046 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8047 switch (uAccessType)
8048 {
8049 /*
8050 * MOV to CRx.
8051 */
8052 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8053 {
8054 /*
8055 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8056 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8057 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8058 * PAE PDPTEs as well.
8059 */
8060 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8061 AssertRCReturn(rc, rc);
8062
8063 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8064#ifndef IN_NEM_DARWIN
8065 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8066#endif
8067 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8068 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8069
8070 /*
8071 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8072 * - When nested paging isn't used.
8073 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8074 * - We are executing in the VM debug loop.
8075 */
8076#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8077# ifndef IN_NEM_DARWIN
8078 Assert( iCrReg != 3
8079 || !VM_IS_VMX_NESTED_PAGING(pVM)
8080 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8081 || pVCpu->hmr0.s.fUsingDebugLoop);
8082# else
8083 Assert( iCrReg != 3
8084 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8085# endif
8086#endif
8087
8088 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8089 Assert( iCrReg != 8
8090 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8091
8092 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8093 AssertMsg( rcStrict == VINF_SUCCESS
8094 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8095
8096#ifndef IN_NEM_DARWIN
8097 /*
8098 * This is a kludge for handling switches back to real mode when we try to use
8099 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8100 * deal with special selector values, so we have to return to ring-3 and run
8101 * there till the selector values are V86 mode compatible.
8102 *
8103 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8104 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8105 * this function.
8106 */
8107 if ( iCrReg == 0
8108 && rcStrict == VINF_SUCCESS
8109 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8110 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8111 && (uOldCr0 & X86_CR0_PE)
8112 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8113 {
8114 /** @todo Check selectors rather than returning all the time. */
8115 Assert(!pVmxTransient->fIsNestedGuest);
8116 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8117 rcStrict = VINF_EM_RESCHEDULE_REM;
8118 }
8119#endif
8120
8121 break;
8122 }
8123
8124 /*
8125 * MOV from CRx.
8126 */
8127 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8128 {
8129 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8130 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8131
8132 /*
8133 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8134 * - When nested paging isn't used.
8135 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8136 * - We are executing in the VM debug loop.
8137 */
8138#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8139# ifndef IN_NEM_DARWIN
8140 Assert( iCrReg != 3
8141 || !VM_IS_VMX_NESTED_PAGING(pVM)
8142 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8143 || pVCpu->hmr0.s.fLeaveDone);
8144# else
8145 Assert( iCrReg != 3
8146 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8147# endif
8148#endif
8149
8150 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8151 Assert( iCrReg != 8
8152 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8153
8154 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8155 break;
8156 }
8157
8158 /*
8159 * CLTS (Clear Task-Switch Flag in CR0).
8160 */
8161 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8162 {
8163 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8164 break;
8165 }
8166
8167 /*
8168 * LMSW (Load Machine-Status Word into CR0).
8169 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8170 */
8171 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8172 {
8173 RTGCPTR GCPtrEffDst;
8174 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8175 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8176 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8177 if (fMemOperand)
8178 {
8179 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8180 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8181 }
8182 else
8183 GCPtrEffDst = NIL_RTGCPTR;
8184 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8185 break;
8186 }
8187
8188 default:
8189 {
8190 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8191 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8192 }
8193 }
8194
8195 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8196 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8197 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8198
8199 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8200 NOREF(pVM);
8201 return rcStrict;
8202}
8203
8204
8205/**
8206 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8207 * VM-exit.
8208 */
8209HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8210{
8211 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8212 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8213
8214 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8215 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8216 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8217 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8218 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8219 | CPUMCTX_EXTRN_EFER);
8220 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8221 AssertRCReturn(rc, rc);
8222
8223 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8224 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8225 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8226 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8227 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8228 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8229 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8230 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8231
8232 /*
8233 * Update exit history to see if this exit can be optimized.
8234 */
8235 VBOXSTRICTRC rcStrict;
8236 PCEMEXITREC pExitRec = NULL;
8237 if ( !fGstStepping
8238 && !fDbgStepping)
8239 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8240 !fIOString
8241 ? !fIOWrite
8242 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8243 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8244 : !fIOWrite
8245 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8246 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8247 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8248 if (!pExitRec)
8249 {
8250 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8251 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8252
8253 uint32_t const cbValue = s_aIOSizes[uIOSize];
8254 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8255 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8256 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8257 if (fIOString)
8258 {
8259 /*
8260 * INS/OUTS - I/O String instruction.
8261 *
8262 * Use instruction-information if available, otherwise fall back on
8263 * interpreting the instruction.
8264 */
8265 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8266 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8267 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8268 if (fInsOutsInfo)
8269 {
8270 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8271 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8272 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8273 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8274 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8275 if (fIOWrite)
8276 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8277 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8278 else
8279 {
8280 /*
8281 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8282 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8283 * See Intel Instruction spec. for "INS".
8284 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8285 */
8286 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8287 }
8288 }
8289 else
8290 rcStrict = IEMExecOne(pVCpu);
8291
8292 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8293 fUpdateRipAlready = true;
8294 }
8295 else
8296 {
8297 /*
8298 * IN/OUT - I/O instruction.
8299 */
8300 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8301 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8302 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8303 if (fIOWrite)
8304 {
8305 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8306 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8307#ifndef IN_NEM_DARWIN
8308 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8309 && !pCtx->eflags.Bits.u1TF)
8310 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8311#endif
8312 }
8313 else
8314 {
8315 uint32_t u32Result = 0;
8316 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8317 if (IOM_SUCCESS(rcStrict))
8318 {
8319 /* Save result of I/O IN instr. in AL/AX/EAX. */
8320 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8321 }
8322#ifndef IN_NEM_DARWIN
8323 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8324 && !pCtx->eflags.Bits.u1TF)
8325 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8326#endif
8327 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8328 }
8329 }
8330
8331 if (IOM_SUCCESS(rcStrict))
8332 {
8333 if (!fUpdateRipAlready)
8334 {
8335 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8336 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8337 }
8338
8339 /*
8340 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8341 * while booting Fedora 17 64-bit guest.
8342 *
8343 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8344 */
8345 if (fIOString)
8346 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8347
8348 /*
8349 * If any I/O breakpoints are armed, we need to check if one triggered
8350 * and take appropriate action.
8351 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8352 */
8353 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8354 AssertRCReturn(rc, rc);
8355
8356 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8357 * execution engines about whether hyper BPs and such are pending. */
8358 uint32_t const uDr7 = pCtx->dr[7];
8359 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8360 && X86_DR7_ANY_RW_IO(uDr7)
8361 && (pCtx->cr4 & X86_CR4_DE))
8362 || DBGFBpIsHwIoArmed(pVM)))
8363 {
8364 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8365
8366#ifndef IN_NEM_DARWIN
8367 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8368 VMMRZCallRing3Disable(pVCpu);
8369 HM_DISABLE_PREEMPT(pVCpu);
8370
8371 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8372
8373 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8374 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8375 {
8376 /* Raise #DB. */
8377 if (fIsGuestDbgActive)
8378 ASMSetDR6(pCtx->dr[6]);
8379 if (pCtx->dr[7] != uDr7)
8380 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8381
8382 vmxHCSetPendingXcptDB(pVCpu);
8383 }
8384 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8385 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8386 else if ( rcStrict2 != VINF_SUCCESS
8387 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8388 rcStrict = rcStrict2;
8389 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8390
8391 HM_RESTORE_PREEMPT();
8392 VMMRZCallRing3Enable(pVCpu);
8393#else
8394 /** @todo */
8395#endif
8396 }
8397 }
8398
8399#ifdef VBOX_STRICT
8400 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8401 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8402 Assert(!fIOWrite);
8403 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8404 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8405 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8406 Assert(fIOWrite);
8407 else
8408 {
8409# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8410 * statuses, that the VMM device and some others may return. See
8411 * IOM_SUCCESS() for guidance. */
8412 AssertMsg( RT_FAILURE(rcStrict)
8413 || rcStrict == VINF_SUCCESS
8414 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8415 || rcStrict == VINF_EM_DBG_BREAKPOINT
8416 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8417 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8418# endif
8419 }
8420#endif
8421 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8422 }
8423 else
8424 {
8425 /*
8426 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8427 */
8428 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8429 AssertRCReturn(rc2, rc2);
8430 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8431 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8432 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8433 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8434 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8435 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8436
8437 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8438 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8439
8440 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8441 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8442 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8443 }
8444 return rcStrict;
8445}
8446
8447
8448/**
8449 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8450 * VM-exit.
8451 */
8452HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8453{
8454 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8455
8456 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8457 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8458 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8459 {
8460 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8461 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8462 {
8463 uint32_t uErrCode;
8464 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8465 {
8466 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8467 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8468 }
8469 else
8470 uErrCode = 0;
8471
8472 RTGCUINTPTR GCPtrFaultAddress;
8473 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8474 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8475 else
8476 GCPtrFaultAddress = 0;
8477
8478 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8479
8480 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8481 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8482
8483 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8484 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8485 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8486 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8487 }
8488 }
8489
8490 /* Fall back to the interpreter to emulate the task-switch. */
8491 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8492 return VERR_EM_INTERPRETER;
8493}
8494
8495
8496/**
8497 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8498 */
8499HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8500{
8501 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8502
8503 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8504 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8505 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8506 AssertRC(rc);
8507 return VINF_EM_DBG_STEPPED;
8508}
8509
8510
8511/**
8512 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8513 */
8514HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8515{
8516 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8517 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8518
8519 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8520 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8521 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8522 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8523 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8524
8525 /*
8526 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8527 */
8528 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8529 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8530 {
8531 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8532 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8533 {
8534 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8535 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8536 }
8537 }
8538 else
8539 {
8540 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8541 return rcStrict;
8542 }
8543
8544 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8545 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8546 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8547 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8548 AssertRCReturn(rc, rc);
8549
8550 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8551 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8552 switch (uAccessType)
8553 {
8554#ifndef IN_NEM_DARWIN
8555 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8556 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8557 {
8558 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8559 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8560 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8561
8562 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8563 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8564 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8565 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8566 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8567
8568 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8569 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8570 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8571 if ( rcStrict == VINF_SUCCESS
8572 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8573 || rcStrict == VERR_PAGE_NOT_PRESENT)
8574 {
8575 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8576 | HM_CHANGED_GUEST_APIC_TPR);
8577 rcStrict = VINF_SUCCESS;
8578 }
8579 break;
8580 }
8581#else
8582 /** @todo */
8583#endif
8584
8585 default:
8586 {
8587 Log4Func(("uAccessType=%#x\n", uAccessType));
8588 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8589 break;
8590 }
8591 }
8592
8593 if (rcStrict != VINF_SUCCESS)
8594 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8595 return rcStrict;
8596}
8597
8598
8599/**
8600 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8601 * VM-exit.
8602 */
8603HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8604{
8605 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8606 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8607
8608 /*
8609 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8610 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8611 * must emulate the MOV DRx access.
8612 */
8613 if (!pVmxTransient->fIsNestedGuest)
8614 {
8615 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8616 if (pVmxTransient->fWasGuestDebugStateActive)
8617 {
8618 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8619 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8620 }
8621
8622 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8623 && !pVmxTransient->fWasHyperDebugStateActive)
8624 {
8625 Assert(!DBGFIsStepping(pVCpu));
8626 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8627
8628 /* Don't intercept MOV DRx any more. */
8629 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8630 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8631 AssertRC(rc);
8632
8633#ifndef IN_NEM_DARWIN
8634 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8635 VMMRZCallRing3Disable(pVCpu);
8636 HM_DISABLE_PREEMPT(pVCpu);
8637
8638 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8639 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8640 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8641
8642 HM_RESTORE_PREEMPT();
8643 VMMRZCallRing3Enable(pVCpu);
8644#else
8645 CPUMR3NemActivateGuestDebugState(pVCpu);
8646 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8647 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8648#endif
8649
8650#ifdef VBOX_WITH_STATISTICS
8651 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8652 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8653 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8654 else
8655 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8656#endif
8657 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8658 return VINF_SUCCESS;
8659 }
8660 }
8661
8662 /*
8663 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8664 * The EFER MSR is always up-to-date.
8665 * Update the segment registers and DR7 from the CPU.
8666 */
8667 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8668 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8669 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8670 AssertRCReturn(rc, rc);
8671 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8672
8673 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8674 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8675 {
8676 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8677 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8678 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8679 if (RT_SUCCESS(rc))
8680 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8681 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8682 }
8683 else
8684 {
8685 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8686 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8687 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8688 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8689 }
8690
8691 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8692 if (RT_SUCCESS(rc))
8693 {
8694 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8695 AssertRCReturn(rc2, rc2);
8696 return VINF_SUCCESS;
8697 }
8698 return rc;
8699}
8700
8701
8702/**
8703 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8704 * Conditional VM-exit.
8705 */
8706HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8707{
8708 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8709
8710#ifndef IN_NEM_DARWIN
8711 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8712
8713 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8714 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8715 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8716 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8717 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8718
8719 /*
8720 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8721 */
8722 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8723 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8724 {
8725 /*
8726 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8727 * instruction emulation to inject the original event. Otherwise, injecting the original event
8728 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8729 */
8730 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8731 { /* likely */ }
8732 else
8733 {
8734 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8735#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8736 /** @todo NSTVMX: Think about how this should be handled. */
8737 if (pVmxTransient->fIsNestedGuest)
8738 return VERR_VMX_IPE_3;
8739#endif
8740 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8741 }
8742 }
8743 else
8744 {
8745 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8746 return rcStrict;
8747 }
8748
8749 /*
8750 * Get sufficient state and update the exit history entry.
8751 */
8752 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8753 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8754 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8755 AssertRCReturn(rc, rc);
8756
8757 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8758 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8759 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8760 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8761 if (!pExitRec)
8762 {
8763 /*
8764 * If we succeed, resume guest execution.
8765 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8766 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8767 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8768 * weird case. See @bugref{6043}.
8769 */
8770 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8771 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8772/** @todo bird: We can probably just go straight to IOM here and assume that
8773 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8774 * well. However, we need to address that aliasing workarounds that
8775 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8776 *
8777 * Might also be interesting to see if we can get this done more or
8778 * less locklessly inside IOM. Need to consider the lookup table
8779 * updating and use a bit more carefully first (or do all updates via
8780 * rendezvous) */
8781 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8782 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8783 if ( rcStrict == VINF_SUCCESS
8784 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8785 || rcStrict == VERR_PAGE_NOT_PRESENT)
8786 {
8787 /* Successfully handled MMIO operation. */
8788 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8789 | HM_CHANGED_GUEST_APIC_TPR);
8790 rcStrict = VINF_SUCCESS;
8791 }
8792 }
8793 else
8794 {
8795 /*
8796 * Frequent exit or something needing probing. Call EMHistoryExec.
8797 */
8798 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8799 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8800
8801 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8802 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8803
8804 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8805 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8806 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8807 }
8808 return rcStrict;
8809#else
8810 AssertFailed();
8811 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8812#endif
8813}
8814
8815
8816/**
8817 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8818 * VM-exit.
8819 */
8820HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8821{
8822 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8823#ifndef IN_NEM_DARWIN
8824 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8825
8826 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8827 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8828 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8829 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8830 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8831 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8832
8833 /*
8834 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8835 */
8836 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8837 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8838 {
8839 /*
8840 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8841 * we shall resolve the nested #PF and re-inject the original event.
8842 */
8843 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8844 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8845 }
8846 else
8847 {
8848 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8849 return rcStrict;
8850 }
8851
8852 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8853 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8854 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8855 AssertRCReturn(rc, rc);
8856
8857 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8858 uint64_t const uExitQual = pVmxTransient->uExitQual;
8859 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8860
8861 RTGCUINT uErrorCode = 0;
8862 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8863 uErrorCode |= X86_TRAP_PF_ID;
8864 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8865 uErrorCode |= X86_TRAP_PF_RW;
8866 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8867 uErrorCode |= X86_TRAP_PF_P;
8868
8869 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8870 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8871
8872 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8873
8874 /*
8875 * Handle the pagefault trap for the nested shadow table.
8876 */
8877 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8878 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8879 TRPMResetTrap(pVCpu);
8880
8881 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8882 if ( rcStrict == VINF_SUCCESS
8883 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8884 || rcStrict == VERR_PAGE_NOT_PRESENT)
8885 {
8886 /* Successfully synced our nested page tables. */
8887 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8888 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8889 return VINF_SUCCESS;
8890 }
8891#else
8892 PVM pVM = pVCpu->CTX_SUFF(pVM);
8893 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8894 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8895 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8896 vmxHCImportGuestRip(pVCpu);
8897 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8898
8899 /*
8900 * Ask PGM for information about the given GCPhys. We need to check if we're
8901 * out of sync first.
8902 */
8903 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8904 PGMPHYSNEMPAGEINFO Info;
8905 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8906 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8907 if (RT_SUCCESS(rc))
8908 {
8909 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8910 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8911 {
8912 if (State.fCanResume)
8913 {
8914 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8915 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8916 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8917 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8918 State.fDidSomething ? "" : " no-change"));
8919 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8920 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8921 return VINF_SUCCESS;
8922 }
8923 }
8924
8925 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8926 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8927 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8928 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8929 State.fDidSomething ? "" : " no-change"));
8930 }
8931 else
8932 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8933 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8934 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8935
8936 /*
8937 * Emulate the memory access, either access handler or special memory.
8938 */
8939 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8940 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8941 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8942 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8943 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8944
8945 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8946 AssertRCReturn(rc, rc);
8947
8948 VBOXSTRICTRC rcStrict;
8949 if (!pExitRec)
8950 rcStrict = IEMExecOne(pVCpu);
8951 else
8952 {
8953 /* Frequent access or probing. */
8954 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8955 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8956 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8957 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8958 }
8959
8960 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8961#endif
8962
8963 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8964 return rcStrict;
8965}
8966
8967
8968#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8969/**
8970 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8971 */
8972HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8973{
8974 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8975
8976 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8977 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8978 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8979 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8980 | CPUMCTX_EXTRN_HWVIRT
8981 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8982 AssertRCReturn(rc, rc);
8983
8984 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8985
8986 VMXVEXITINFO ExitInfo;
8987 RT_ZERO(ExitInfo);
8988 ExitInfo.uReason = pVmxTransient->uExitReason;
8989 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8990 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8991 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8992 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8993
8994 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8995 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8996 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8997 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8998 {
8999 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9000 rcStrict = VINF_SUCCESS;
9001 }
9002 return rcStrict;
9003}
9004
9005
9006/**
9007 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9008 */
9009HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9010{
9011 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9012
9013 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9014 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9015 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9016 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9017 AssertRCReturn(rc, rc);
9018
9019 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9020
9021 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9022 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9023 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9024 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9025 {
9026 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9027 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9028 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9029 }
9030 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9031 return rcStrict;
9032}
9033
9034
9035/**
9036 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9037 */
9038HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9039{
9040 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9041
9042 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9043 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9044 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9045 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9046 | CPUMCTX_EXTRN_HWVIRT
9047 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9048 AssertRCReturn(rc, rc);
9049
9050 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9051
9052 VMXVEXITINFO ExitInfo;
9053 RT_ZERO(ExitInfo);
9054 ExitInfo.uReason = pVmxTransient->uExitReason;
9055 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9056 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9057 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9058 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9059
9060 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9061 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9063 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9064 {
9065 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9066 rcStrict = VINF_SUCCESS;
9067 }
9068 return rcStrict;
9069}
9070
9071
9072/**
9073 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9074 */
9075HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9076{
9077 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9078
9079 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9080 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9081 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9082 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9083 | CPUMCTX_EXTRN_HWVIRT
9084 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9085 AssertRCReturn(rc, rc);
9086
9087 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9088
9089 VMXVEXITINFO ExitInfo;
9090 RT_ZERO(ExitInfo);
9091 ExitInfo.uReason = pVmxTransient->uExitReason;
9092 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9093 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9094 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9095 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9096
9097 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9098 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9099 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9100 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9101 {
9102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9103 rcStrict = VINF_SUCCESS;
9104 }
9105 return rcStrict;
9106}
9107
9108
9109/**
9110 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9111 */
9112HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9113{
9114 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9115
9116 /*
9117 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9118 * thus might not need to import the shadow VMCS state, it's safer just in case
9119 * code elsewhere dares look at unsynced VMCS fields.
9120 */
9121 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9122 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9123 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9124 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9125 | CPUMCTX_EXTRN_HWVIRT
9126 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9127 AssertRCReturn(rc, rc);
9128
9129 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9130
9131 VMXVEXITINFO ExitInfo;
9132 RT_ZERO(ExitInfo);
9133 ExitInfo.uReason = pVmxTransient->uExitReason;
9134 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9135 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9136 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9137 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9138 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9139
9140 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9141 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9142 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9143 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9144 {
9145 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9146 rcStrict = VINF_SUCCESS;
9147 }
9148 return rcStrict;
9149}
9150
9151
9152/**
9153 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9154 */
9155HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9156{
9157 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9158
9159 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9160 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9161 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9162 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9163 AssertRCReturn(rc, rc);
9164
9165 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9166
9167 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9168 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9169 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9170 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9171 {
9172 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9173 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9174 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9175 }
9176 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9177 return rcStrict;
9178}
9179
9180
9181/**
9182 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9183 */
9184HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9185{
9186 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9187
9188 /*
9189 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9190 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9191 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9192 */
9193 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9194 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9195 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9196 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9197 | CPUMCTX_EXTRN_HWVIRT
9198 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9199 AssertRCReturn(rc, rc);
9200
9201 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9202
9203 VMXVEXITINFO ExitInfo;
9204 RT_ZERO(ExitInfo);
9205 ExitInfo.uReason = pVmxTransient->uExitReason;
9206 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9207 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9208 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9209 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9210 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9211
9212 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9213 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9214 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9215 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9216 {
9217 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9218 rcStrict = VINF_SUCCESS;
9219 }
9220 return rcStrict;
9221}
9222
9223
9224/**
9225 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9226 */
9227HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9228{
9229 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9230
9231 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9232 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9233 | CPUMCTX_EXTRN_HWVIRT
9234 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9235 AssertRCReturn(rc, rc);
9236
9237 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9238
9239 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9240 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9241 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9242 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9243 {
9244 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9245 rcStrict = VINF_SUCCESS;
9246 }
9247 return rcStrict;
9248}
9249
9250
9251/**
9252 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9253 */
9254HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9255{
9256 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9257
9258 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9259 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9260 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9261 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9262 | CPUMCTX_EXTRN_HWVIRT
9263 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9264 AssertRCReturn(rc, rc);
9265
9266 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9267
9268 VMXVEXITINFO ExitInfo;
9269 RT_ZERO(ExitInfo);
9270 ExitInfo.uReason = pVmxTransient->uExitReason;
9271 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9272 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9273 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9274 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9275
9276 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9277 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9278 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9279 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9280 {
9281 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9282 rcStrict = VINF_SUCCESS;
9283 }
9284 return rcStrict;
9285}
9286
9287
9288/**
9289 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9290 */
9291HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9292{
9293 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9294
9295 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9296 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9297 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9298 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9299 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9300 AssertRCReturn(rc, rc);
9301
9302 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9303
9304 VMXVEXITINFO ExitInfo;
9305 RT_ZERO(ExitInfo);
9306 ExitInfo.uReason = pVmxTransient->uExitReason;
9307 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9308 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9309 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9310 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9311
9312 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9313 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9314 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9315 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9316 {
9317 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9318 rcStrict = VINF_SUCCESS;
9319 }
9320 return rcStrict;
9321}
9322
9323
9324# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9325/**
9326 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9327 */
9328HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9329{
9330 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9331
9332 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9333 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9334 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9335 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9336 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9337 AssertRCReturn(rc, rc);
9338
9339 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9340
9341 VMXVEXITINFO ExitInfo;
9342 RT_ZERO(ExitInfo);
9343 ExitInfo.uReason = pVmxTransient->uExitReason;
9344 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9345 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9346 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9347 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9348
9349 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9350 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9351 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9352 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9353 {
9354 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9355 rcStrict = VINF_SUCCESS;
9356 }
9357 return rcStrict;
9358}
9359# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9360#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9361/** @} */
9362
9363
9364#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9365/** @name Nested-guest VM-exit handlers.
9366 * @{
9367 */
9368/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9369/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9370/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9371
9372/**
9373 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9374 * Conditional VM-exit.
9375 */
9376HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9377{
9378 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9379
9380 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9381
9382 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9383 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9384 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9385
9386 switch (uExitIntType)
9387 {
9388#ifndef IN_NEM_DARWIN
9389 /*
9390 * Physical NMIs:
9391 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9392 */
9393 case VMX_EXIT_INT_INFO_TYPE_NMI:
9394 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9395#endif
9396
9397 /*
9398 * Hardware exceptions,
9399 * Software exceptions,
9400 * Privileged software exceptions:
9401 * Figure out if the exception must be delivered to the guest or the nested-guest.
9402 */
9403 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9404 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9405 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9406 {
9407 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9408 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9409 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9410 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9411
9412 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9413 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9414 pVmxTransient->uExitIntErrorCode);
9415 if (fIntercept)
9416 {
9417 /* Exit qualification is required for debug and page-fault exceptions. */
9418 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9419
9420 /*
9421 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9422 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9423 * length. However, if delivery of a software interrupt, software exception or privileged
9424 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9425 */
9426 VMXVEXITINFO ExitInfo;
9427 RT_ZERO(ExitInfo);
9428 ExitInfo.uReason = pVmxTransient->uExitReason;
9429 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9430 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9431
9432 VMXVEXITEVENTINFO ExitEventInfo;
9433 RT_ZERO(ExitEventInfo);
9434 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9435 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9436 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9437 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9438
9439#ifdef DEBUG_ramshankar
9440 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9441 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9442 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9443 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9444 {
9445 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9446 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9447 }
9448#endif
9449 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9450 }
9451
9452 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9453 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9454 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9455 }
9456
9457 /*
9458 * Software interrupts:
9459 * VM-exits cannot be caused by software interrupts.
9460 *
9461 * External interrupts:
9462 * This should only happen when "acknowledge external interrupts on VM-exit"
9463 * control is set. However, we never set this when executing a guest or
9464 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9465 * the guest.
9466 */
9467 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9468 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9469 default:
9470 {
9471 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9472 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9473 }
9474 }
9475}
9476
9477
9478/**
9479 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9480 * Unconditional VM-exit.
9481 */
9482HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9483{
9484 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9485 return IEMExecVmxVmexitTripleFault(pVCpu);
9486}
9487
9488
9489/**
9490 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9491 */
9492HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9493{
9494 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9495
9496 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9497 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9498 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9499}
9500
9501
9502/**
9503 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9504 */
9505HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9506{
9507 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9508
9509 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9510 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9511 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9512}
9513
9514
9515/**
9516 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9517 * Unconditional VM-exit.
9518 */
9519HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9520{
9521 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9522
9523 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9524 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9525 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9526 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9527
9528 VMXVEXITINFO ExitInfo;
9529 RT_ZERO(ExitInfo);
9530 ExitInfo.uReason = pVmxTransient->uExitReason;
9531 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9532 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9533
9534 VMXVEXITEVENTINFO ExitEventInfo;
9535 RT_ZERO(ExitEventInfo);
9536 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9537 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9538 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9539}
9540
9541
9542/**
9543 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9544 */
9545HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9546{
9547 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9548
9549 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9550 {
9551 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9552 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9553 }
9554 return vmxHCExitHlt(pVCpu, pVmxTransient);
9555}
9556
9557
9558/**
9559 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9560 */
9561HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9562{
9563 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9564
9565 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9566 {
9567 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9568 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9569
9570 VMXVEXITINFO ExitInfo;
9571 RT_ZERO(ExitInfo);
9572 ExitInfo.uReason = pVmxTransient->uExitReason;
9573 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9574 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9575 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9576 }
9577 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9578}
9579
9580
9581/**
9582 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9583 */
9584HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9585{
9586 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9587
9588 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9589 {
9590 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9591 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9592 }
9593 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9594}
9595
9596
9597/**
9598 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9599 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9600 */
9601HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9602{
9603 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9604
9605 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9606 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9607
9608 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9609
9610 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9611 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9612 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9613
9614 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9615 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9616 u64VmcsField &= UINT64_C(0xffffffff);
9617
9618 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9619 {
9620 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9621 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9622
9623 VMXVEXITINFO ExitInfo;
9624 RT_ZERO(ExitInfo);
9625 ExitInfo.uReason = pVmxTransient->uExitReason;
9626 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9627 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9628 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9629 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9630 }
9631
9632 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9633 return vmxHCExitVmread(pVCpu, pVmxTransient);
9634 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9635}
9636
9637
9638/**
9639 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9640 */
9641HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9642{
9643 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9644
9645 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9646 {
9647 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9648 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9649 }
9650
9651 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9652}
9653
9654
9655/**
9656 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9657 * Conditional VM-exit.
9658 */
9659HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9660{
9661 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9662
9663 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9664 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9665
9666 VBOXSTRICTRC rcStrict;
9667 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9668 switch (uAccessType)
9669 {
9670 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9671 {
9672 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9673 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9674 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9675 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9676
9677 bool fIntercept;
9678 switch (iCrReg)
9679 {
9680 case 0:
9681 case 4:
9682 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9683 break;
9684
9685 case 3:
9686 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9687 break;
9688
9689 case 8:
9690 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9691 break;
9692
9693 default:
9694 fIntercept = false;
9695 break;
9696 }
9697 if (fIntercept)
9698 {
9699 VMXVEXITINFO ExitInfo;
9700 RT_ZERO(ExitInfo);
9701 ExitInfo.uReason = pVmxTransient->uExitReason;
9702 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9703 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9704 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9705 }
9706 else
9707 {
9708 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9709 AssertRCReturn(rc, rc);
9710 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9711 }
9712 break;
9713 }
9714
9715 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9716 {
9717 /*
9718 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9719 * CR2 reads do not cause a VM-exit.
9720 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9721 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9722 */
9723 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9724 if ( iCrReg == 3
9725 || iCrReg == 8)
9726 {
9727 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9728 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9729 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9730 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9731 {
9732 VMXVEXITINFO ExitInfo;
9733 RT_ZERO(ExitInfo);
9734 ExitInfo.uReason = pVmxTransient->uExitReason;
9735 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9736 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9737 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9738 }
9739 else
9740 {
9741 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9742 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9743 }
9744 }
9745 else
9746 {
9747 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9748 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9749 }
9750 break;
9751 }
9752
9753 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9754 {
9755 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9756 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9757 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9758 if ( (uGstHostMask & X86_CR0_TS)
9759 && (uReadShadow & X86_CR0_TS))
9760 {
9761 VMXVEXITINFO ExitInfo;
9762 RT_ZERO(ExitInfo);
9763 ExitInfo.uReason = pVmxTransient->uExitReason;
9764 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9765 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9766 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9767 }
9768 else
9769 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9770 break;
9771 }
9772
9773 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9774 {
9775 RTGCPTR GCPtrEffDst;
9776 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9777 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9778 if (fMemOperand)
9779 {
9780 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9781 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9782 }
9783 else
9784 GCPtrEffDst = NIL_RTGCPTR;
9785
9786 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9787 {
9788 VMXVEXITINFO ExitInfo;
9789 RT_ZERO(ExitInfo);
9790 ExitInfo.uReason = pVmxTransient->uExitReason;
9791 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9792 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9793 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9794 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9795 }
9796 else
9797 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9798 break;
9799 }
9800
9801 default:
9802 {
9803 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9804 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9805 }
9806 }
9807
9808 if (rcStrict == VINF_IEM_RAISED_XCPT)
9809 {
9810 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9811 rcStrict = VINF_SUCCESS;
9812 }
9813 return rcStrict;
9814}
9815
9816
9817/**
9818 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9819 * Conditional VM-exit.
9820 */
9821HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9822{
9823 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9824
9825 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9826 {
9827 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9828 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9829
9830 VMXVEXITINFO ExitInfo;
9831 RT_ZERO(ExitInfo);
9832 ExitInfo.uReason = pVmxTransient->uExitReason;
9833 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9834 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9835 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9836 }
9837 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9838}
9839
9840
9841/**
9842 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9843 * Conditional VM-exit.
9844 */
9845HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9846{
9847 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9848
9849 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9850
9851 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9852 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9853 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9854
9855 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9856 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9857 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9858 {
9859 /*
9860 * IN/OUT instruction:
9861 * - Provides VM-exit instruction length.
9862 *
9863 * INS/OUTS instruction:
9864 * - Provides VM-exit instruction length.
9865 * - Provides Guest-linear address.
9866 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9867 */
9868 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9869 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9870
9871 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9872 pVmxTransient->ExitInstrInfo.u = 0;
9873 pVmxTransient->uGuestLinearAddr = 0;
9874
9875 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9876 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9877 if (fIOString)
9878 {
9879 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9880 if (fVmxInsOutsInfo)
9881 {
9882 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9883 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9884 }
9885 }
9886
9887 VMXVEXITINFO ExitInfo;
9888 RT_ZERO(ExitInfo);
9889 ExitInfo.uReason = pVmxTransient->uExitReason;
9890 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9891 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9892 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9893 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9894 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9895 }
9896 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9897}
9898
9899
9900/**
9901 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9902 */
9903HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9904{
9905 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9906
9907 uint32_t fMsrpm;
9908 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9909 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9910 else
9911 fMsrpm = VMXMSRPM_EXIT_RD;
9912
9913 if (fMsrpm & VMXMSRPM_EXIT_RD)
9914 {
9915 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9916 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9917 }
9918 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9919}
9920
9921
9922/**
9923 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9924 */
9925HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9926{
9927 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9928
9929 uint32_t fMsrpm;
9930 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9931 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9932 else
9933 fMsrpm = VMXMSRPM_EXIT_WR;
9934
9935 if (fMsrpm & VMXMSRPM_EXIT_WR)
9936 {
9937 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9938 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9939 }
9940 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9941}
9942
9943
9944/**
9945 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9946 */
9947HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9948{
9949 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9950
9951 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9952 {
9953 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9954 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9955 }
9956 return vmxHCExitMwait(pVCpu, pVmxTransient);
9957}
9958
9959
9960/**
9961 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9962 * VM-exit.
9963 */
9964HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9965{
9966 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9967
9968 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9969 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9970 VMXVEXITINFO ExitInfo;
9971 RT_ZERO(ExitInfo);
9972 ExitInfo.uReason = pVmxTransient->uExitReason;
9973 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9974 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9975}
9976
9977
9978/**
9979 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9980 */
9981HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9982{
9983 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9984
9985 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9986 {
9987 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9988 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9989 }
9990 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9991}
9992
9993
9994/**
9995 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9996 */
9997HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9998{
9999 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10000
10001 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10002 * PAUSE when executing a nested-guest? If it does not, we would not need
10003 * to check for the intercepts here. Just call VM-exit... */
10004
10005 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10006 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10007 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10008 {
10009 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10010 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10011 }
10012 return vmxHCExitPause(pVCpu, pVmxTransient);
10013}
10014
10015
10016/**
10017 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10018 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10019 */
10020HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10021{
10022 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10023
10024 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10025 {
10026 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
10027 VMXVEXITINFO ExitInfo;
10028 RT_ZERO(ExitInfo);
10029 ExitInfo.uReason = pVmxTransient->uExitReason;
10030 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
10031 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10032 }
10033 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10034}
10035
10036
10037/**
10038 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10039 * VM-exit.
10040 */
10041HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10042{
10043 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10044
10045 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10046 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10047 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10048 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10049
10050 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10051
10052 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10053 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10054
10055 VMXVEXITINFO ExitInfo;
10056 RT_ZERO(ExitInfo);
10057 ExitInfo.uReason = pVmxTransient->uExitReason;
10058 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10059 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10060
10061 VMXVEXITEVENTINFO ExitEventInfo;
10062 RT_ZERO(ExitEventInfo);
10063 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10064 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10065 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10066}
10067
10068
10069/**
10070 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10071 * Conditional VM-exit.
10072 */
10073HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10074{
10075 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10076
10077 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10078 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10079 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10080}
10081
10082
10083/**
10084 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10085 * Conditional VM-exit.
10086 */
10087HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10088{
10089 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10090
10091 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10092 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10093 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10094}
10095
10096
10097/**
10098 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10099 */
10100HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10101{
10102 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10103
10104 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10105 {
10106 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10107 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10108 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10109 }
10110 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10111}
10112
10113
10114/**
10115 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10116 */
10117HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10118{
10119 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10120
10121 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10122 {
10123 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10124 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10125 }
10126 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10127}
10128
10129
10130/**
10131 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10132 */
10133HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10134{
10135 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10136
10137 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10138 {
10139 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10140 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10141 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10142 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10143
10144 VMXVEXITINFO ExitInfo;
10145 RT_ZERO(ExitInfo);
10146 ExitInfo.uReason = pVmxTransient->uExitReason;
10147 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10148 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10149 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10150 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10151 }
10152 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10153}
10154
10155
10156/**
10157 * Nested-guest VM-exit handler for invalid-guest state
10158 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10159 */
10160HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10161{
10162 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10163
10164 /*
10165 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10166 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10167 * Handle it like it's in an invalid guest state of the outer guest.
10168 *
10169 * When the fast path is implemented, this should be changed to cause the corresponding
10170 * nested-guest VM-exit.
10171 */
10172 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10173}
10174
10175
10176/**
10177 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10178 * and only provide the instruction length.
10179 *
10180 * Unconditional VM-exit.
10181 */
10182HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10183{
10184 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10185
10186#ifdef VBOX_STRICT
10187 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10188 switch (pVmxTransient->uExitReason)
10189 {
10190 case VMX_EXIT_ENCLS:
10191 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10192 break;
10193
10194 case VMX_EXIT_VMFUNC:
10195 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10196 break;
10197 }
10198#endif
10199
10200 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10201 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10202}
10203
10204
10205/**
10206 * Nested-guest VM-exit handler for instructions that provide instruction length as
10207 * well as more information.
10208 *
10209 * Unconditional VM-exit.
10210 */
10211HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10212{
10213 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10214
10215#ifdef VBOX_STRICT
10216 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10217 switch (pVmxTransient->uExitReason)
10218 {
10219 case VMX_EXIT_GDTR_IDTR_ACCESS:
10220 case VMX_EXIT_LDTR_TR_ACCESS:
10221 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10222 break;
10223
10224 case VMX_EXIT_RDRAND:
10225 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10226 break;
10227
10228 case VMX_EXIT_RDSEED:
10229 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10230 break;
10231
10232 case VMX_EXIT_XSAVES:
10233 case VMX_EXIT_XRSTORS:
10234 /** @todo NSTVMX: Verify XSS-bitmap. */
10235 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10236 break;
10237
10238 case VMX_EXIT_UMWAIT:
10239 case VMX_EXIT_TPAUSE:
10240 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10241 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10242 break;
10243
10244 case VMX_EXIT_LOADIWKEY:
10245 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10246 break;
10247 }
10248#endif
10249
10250 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10251 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10252 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10253
10254 VMXVEXITINFO ExitInfo;
10255 RT_ZERO(ExitInfo);
10256 ExitInfo.uReason = pVmxTransient->uExitReason;
10257 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10258 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10259 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10260 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10261}
10262
10263
10264# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10265/**
10266 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10267 * Conditional VM-exit.
10268 */
10269HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10270{
10271 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10272 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10273
10274 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10275 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10276 {
10277 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10278 AssertRCReturn(rc, rc);
10279
10280 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10281 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10282 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10283
10284 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10285 uint64_t const uExitQual = pVmxTransient->uExitQual;
10286
10287 RTGCPTR GCPtrNested;
10288 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10289 if (fIsLinearAddrValid)
10290 {
10291 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10292 GCPtrNested = pVmxTransient->uGuestLinearAddr;
10293 }
10294 else
10295 GCPtrNested = 0;
10296
10297 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10298 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10299 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10300 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10301 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10302
10303 PGMPTWALK Walk;
10304 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10305 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx), GCPhysNested,
10306 fIsLinearAddrValid, GCPtrNested, &Walk);
10307 if (RT_SUCCESS(rcStrict))
10308 {
10309 if (rcStrict == VINF_SUCCESS)
10310 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10311 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10312 {
10313 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10314 rcStrict = VINF_SUCCESS;
10315 }
10316 return rcStrict;
10317 }
10318
10319 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10320 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10321
10322 VMXVEXITEVENTINFO ExitEventInfo;
10323 RT_ZERO(ExitEventInfo);
10324 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10325 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10326
10327 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10328 {
10329 VMXVEXITINFO ExitInfo;
10330 RT_ZERO(ExitInfo);
10331 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION;
10332 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10333 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10334 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
10335 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr;
10336 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10337 }
10338
10339 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10340 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10341 }
10342
10343 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10344}
10345
10346
10347/**
10348 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10349 * Conditional VM-exit.
10350 */
10351HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10352{
10353 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10354 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10355
10356 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10357 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10358 {
10359 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10360 AssertRCReturn(rc, rc);
10361
10362 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10363
10364 PGMPTWALK Walk;
10365 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10366 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10367 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10368 GCPhysNested, false /* fIsLinearAddrValid */,
10369 0 /* GCPtrNested*/, &Walk);
10370 if (RT_SUCCESS(rcStrict))
10371 return VINF_EM_RAW_EMULATE_INSTR;
10372
10373 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10374 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10375
10376 VMXVEXITEVENTINFO ExitEventInfo;
10377 RT_ZERO(ExitEventInfo);
10378 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10379 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10380
10381 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10382 }
10383
10384 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10385}
10386# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10387
10388/** @} */
10389#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10390
10391
10392/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10393 * probes.
10394 *
10395 * The following few functions and associated structure contains the bloat
10396 * necessary for providing detailed debug events and dtrace probes as well as
10397 * reliable host side single stepping. This works on the principle of
10398 * "subclassing" the normal execution loop and workers. We replace the loop
10399 * method completely and override selected helpers to add necessary adjustments
10400 * to their core operation.
10401 *
10402 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10403 * any performance for debug and analysis features.
10404 *
10405 * @{
10406 */
10407
10408/**
10409 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10410 * the debug run loop.
10411 */
10412typedef struct VMXRUNDBGSTATE
10413{
10414 /** The RIP we started executing at. This is for detecting that we stepped. */
10415 uint64_t uRipStart;
10416 /** The CS we started executing with. */
10417 uint16_t uCsStart;
10418
10419 /** Whether we've actually modified the 1st execution control field. */
10420 bool fModifiedProcCtls : 1;
10421 /** Whether we've actually modified the 2nd execution control field. */
10422 bool fModifiedProcCtls2 : 1;
10423 /** Whether we've actually modified the exception bitmap. */
10424 bool fModifiedXcptBitmap : 1;
10425
10426 /** We desire the modified the CR0 mask to be cleared. */
10427 bool fClearCr0Mask : 1;
10428 /** We desire the modified the CR4 mask to be cleared. */
10429 bool fClearCr4Mask : 1;
10430 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10431 uint32_t fCpe1Extra;
10432 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10433 uint32_t fCpe1Unwanted;
10434 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10435 uint32_t fCpe2Extra;
10436 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10437 uint32_t bmXcptExtra;
10438 /** The sequence number of the Dtrace provider settings the state was
10439 * configured against. */
10440 uint32_t uDtraceSettingsSeqNo;
10441 /** VM-exits to check (one bit per VM-exit). */
10442 uint32_t bmExitsToCheck[3];
10443
10444 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10445 uint32_t fProcCtlsInitial;
10446 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10447 uint32_t fProcCtls2Initial;
10448 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10449 uint32_t bmXcptInitial;
10450} VMXRUNDBGSTATE;
10451AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10452typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10453
10454
10455/**
10456 * Initializes the VMXRUNDBGSTATE structure.
10457 *
10458 * @param pVCpu The cross context virtual CPU structure of the
10459 * calling EMT.
10460 * @param pVmxTransient The VMX-transient structure.
10461 * @param pDbgState The debug state to initialize.
10462 */
10463static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10464{
10465 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10466 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10467
10468 pDbgState->fModifiedProcCtls = false;
10469 pDbgState->fModifiedProcCtls2 = false;
10470 pDbgState->fModifiedXcptBitmap = false;
10471 pDbgState->fClearCr0Mask = false;
10472 pDbgState->fClearCr4Mask = false;
10473 pDbgState->fCpe1Extra = 0;
10474 pDbgState->fCpe1Unwanted = 0;
10475 pDbgState->fCpe2Extra = 0;
10476 pDbgState->bmXcptExtra = 0;
10477 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10478 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10479 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10480}
10481
10482
10483/**
10484 * Updates the VMSC fields with changes requested by @a pDbgState.
10485 *
10486 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10487 * immediately before executing guest code, i.e. when interrupts are disabled.
10488 * We don't check status codes here as we cannot easily assert or return in the
10489 * latter case.
10490 *
10491 * @param pVCpu The cross context virtual CPU structure.
10492 * @param pVmxTransient The VMX-transient structure.
10493 * @param pDbgState The debug state.
10494 */
10495static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10496{
10497 /*
10498 * Ensure desired flags in VMCS control fields are set.
10499 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10500 *
10501 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10502 * there should be no stale data in pCtx at this point.
10503 */
10504 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10505 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10506 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10507 {
10508 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10509 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10510 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10511 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10512 pDbgState->fModifiedProcCtls = true;
10513 }
10514
10515 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10516 {
10517 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10518 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10519 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10520 pDbgState->fModifiedProcCtls2 = true;
10521 }
10522
10523 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10524 {
10525 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10526 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10527 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10528 pDbgState->fModifiedXcptBitmap = true;
10529 }
10530
10531 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10532 {
10533 pVmcsInfo->u64Cr0Mask = 0;
10534 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10535 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10536 }
10537
10538 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10539 {
10540 pVmcsInfo->u64Cr4Mask = 0;
10541 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10542 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10543 }
10544
10545 NOREF(pVCpu);
10546}
10547
10548
10549/**
10550 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10551 * re-entry next time around.
10552 *
10553 * @returns Strict VBox status code (i.e. informational status codes too).
10554 * @param pVCpu The cross context virtual CPU structure.
10555 * @param pVmxTransient The VMX-transient structure.
10556 * @param pDbgState The debug state.
10557 * @param rcStrict The return code from executing the guest using single
10558 * stepping.
10559 */
10560static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10561 VBOXSTRICTRC rcStrict)
10562{
10563 /*
10564 * Restore VM-exit control settings as we may not reenter this function the
10565 * next time around.
10566 */
10567 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10568
10569 /* We reload the initial value, trigger what we can of recalculations the
10570 next time around. From the looks of things, that's all that's required atm. */
10571 if (pDbgState->fModifiedProcCtls)
10572 {
10573 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10574 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10575 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10576 AssertRC(rc2);
10577 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10578 }
10579
10580 /* We're currently the only ones messing with this one, so just restore the
10581 cached value and reload the field. */
10582 if ( pDbgState->fModifiedProcCtls2
10583 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10584 {
10585 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10586 AssertRC(rc2);
10587 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10588 }
10589
10590 /* If we've modified the exception bitmap, we restore it and trigger
10591 reloading and partial recalculation the next time around. */
10592 if (pDbgState->fModifiedXcptBitmap)
10593 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10594
10595 return rcStrict;
10596}
10597
10598
10599/**
10600 * Configures VM-exit controls for current DBGF and DTrace settings.
10601 *
10602 * This updates @a pDbgState and the VMCS execution control fields to reflect
10603 * the necessary VM-exits demanded by DBGF and DTrace.
10604 *
10605 * @param pVCpu The cross context virtual CPU structure.
10606 * @param pVmxTransient The VMX-transient structure. May update
10607 * fUpdatedTscOffsettingAndPreemptTimer.
10608 * @param pDbgState The debug state.
10609 */
10610static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10611{
10612#ifndef IN_NEM_DARWIN
10613 /*
10614 * Take down the dtrace serial number so we can spot changes.
10615 */
10616 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10617 ASMCompilerBarrier();
10618#endif
10619
10620 /*
10621 * We'll rebuild most of the middle block of data members (holding the
10622 * current settings) as we go along here, so start by clearing it all.
10623 */
10624 pDbgState->bmXcptExtra = 0;
10625 pDbgState->fCpe1Extra = 0;
10626 pDbgState->fCpe1Unwanted = 0;
10627 pDbgState->fCpe2Extra = 0;
10628 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10629 pDbgState->bmExitsToCheck[i] = 0;
10630
10631 /*
10632 * Software interrupts (INT XXh) - no idea how to trigger these...
10633 */
10634 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10635 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10636 || VBOXVMM_INT_SOFTWARE_ENABLED())
10637 {
10638 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10639 }
10640
10641 /*
10642 * INT3 breakpoints - triggered by #BP exceptions.
10643 */
10644 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10645 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10646
10647 /*
10648 * Exception bitmap and XCPT events+probes.
10649 */
10650 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10651 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10652 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10653
10654 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10655 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10656 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10657 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10658 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10659 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10660 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10661 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10662 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10663 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10664 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10665 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10666 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10667 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10668 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10669 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10670 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10671 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10672
10673 if (pDbgState->bmXcptExtra)
10674 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10675
10676 /*
10677 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10678 *
10679 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10680 * So, when adding/changing/removing please don't forget to update it.
10681 *
10682 * Some of the macros are picking up local variables to save horizontal space,
10683 * (being able to see it in a table is the lesser evil here).
10684 */
10685#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10686 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10687 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10688#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10689 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10690 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10691 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10692 } else do { } while (0)
10693#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10694 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10695 { \
10696 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10697 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10698 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10699 } else do { } while (0)
10700#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10701 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10702 { \
10703 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10704 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10705 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10706 } else do { } while (0)
10707#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10708 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10709 { \
10710 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10711 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10712 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10713 } else do { } while (0)
10714
10715 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10716 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10717 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10718 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10719 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10720
10721 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10722 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10723 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10724 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10725 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10726 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10727 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10728 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10729 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10730 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10731 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10732 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10733 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10734 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10735 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10736 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10737 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10738 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10739 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10740 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10741 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10742 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10743 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10744 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10745 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10746 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10747 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10748 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10749 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10750 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10751 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10752 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10753 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10754 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10755 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10756 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10757
10758 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10759 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10760 {
10761 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10762 | CPUMCTX_EXTRN_APIC_TPR);
10763 AssertRC(rc);
10764
10765#if 0 /** @todo fix me */
10766 pDbgState->fClearCr0Mask = true;
10767 pDbgState->fClearCr4Mask = true;
10768#endif
10769 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10770 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10771 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10772 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10773 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10774 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10775 require clearing here and in the loop if we start using it. */
10776 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10777 }
10778 else
10779 {
10780 if (pDbgState->fClearCr0Mask)
10781 {
10782 pDbgState->fClearCr0Mask = false;
10783 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10784 }
10785 if (pDbgState->fClearCr4Mask)
10786 {
10787 pDbgState->fClearCr4Mask = false;
10788 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10789 }
10790 }
10791 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10792 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10793
10794 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10795 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10796 {
10797 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10798 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10799 }
10800 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10801 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10802
10803 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10804 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10805 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10806 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10807 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10808 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10809 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10810 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10811#if 0 /** @todo too slow, fix handler. */
10812 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10813#endif
10814 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10815
10816 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10817 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10818 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10819 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10820 {
10821 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10822 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10823 }
10824 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10825 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10826 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10827 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10828
10829 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10830 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10831 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10832 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10833 {
10834 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10835 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10836 }
10837 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10838 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10839 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10840 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10841
10842 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10843 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10844 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10845 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10846 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10847 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10848 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10849 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10850 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10851 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10852 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10853 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10854 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10855 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10856 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10857 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10858 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10859 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10860 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10861 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10862 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10863 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10864
10865#undef IS_EITHER_ENABLED
10866#undef SET_ONLY_XBM_IF_EITHER_EN
10867#undef SET_CPE1_XBM_IF_EITHER_EN
10868#undef SET_CPEU_XBM_IF_EITHER_EN
10869#undef SET_CPE2_XBM_IF_EITHER_EN
10870
10871 /*
10872 * Sanitize the control stuff.
10873 */
10874 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10875 if (pDbgState->fCpe2Extra)
10876 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10877 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10878 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10879#ifndef IN_NEM_DARWIN
10880 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10881 {
10882 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10883 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10884 }
10885#else
10886 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10887 {
10888 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10889 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10890 }
10891#endif
10892
10893 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10894 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10895 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10896 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10897}
10898
10899
10900/**
10901 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10902 * appropriate.
10903 *
10904 * The caller has checked the VM-exit against the
10905 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10906 * already, so we don't have to do that either.
10907 *
10908 * @returns Strict VBox status code (i.e. informational status codes too).
10909 * @param pVCpu The cross context virtual CPU structure.
10910 * @param pVmxTransient The VMX-transient structure.
10911 * @param uExitReason The VM-exit reason.
10912 *
10913 * @remarks The name of this function is displayed by dtrace, so keep it short
10914 * and to the point. No longer than 33 chars long, please.
10915 */
10916static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10917{
10918 /*
10919 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10920 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10921 *
10922 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10923 * does. Must add/change/remove both places. Same ordering, please.
10924 *
10925 * Added/removed events must also be reflected in the next section
10926 * where we dispatch dtrace events.
10927 */
10928 bool fDtrace1 = false;
10929 bool fDtrace2 = false;
10930 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10931 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10932 uint32_t uEventArg = 0;
10933#define SET_EXIT(a_EventSubName) \
10934 do { \
10935 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10936 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10937 } while (0)
10938#define SET_BOTH(a_EventSubName) \
10939 do { \
10940 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10941 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10942 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10943 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10944 } while (0)
10945 switch (uExitReason)
10946 {
10947 case VMX_EXIT_MTF:
10948 return vmxHCExitMtf(pVCpu, pVmxTransient);
10949
10950 case VMX_EXIT_XCPT_OR_NMI:
10951 {
10952 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10953 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10954 {
10955 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10956 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10957 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10958 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10959 {
10960 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10961 {
10962 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10963 uEventArg = pVmxTransient->uExitIntErrorCode;
10964 }
10965 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10966 switch (enmEvent1)
10967 {
10968 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10969 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10970 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10971 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10972 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10973 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10974 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10975 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10976 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10977 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10978 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10979 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10980 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10981 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10982 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10983 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10984 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10985 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10986 default: break;
10987 }
10988 }
10989 else
10990 AssertFailed();
10991 break;
10992
10993 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10994 uEventArg = idxVector;
10995 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10996 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10997 break;
10998 }
10999 break;
11000 }
11001
11002 case VMX_EXIT_TRIPLE_FAULT:
11003 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11004 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11005 break;
11006 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11007 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11008 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11009 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11010 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11011
11012 /* Instruction specific VM-exits: */
11013 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11014 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11015 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11016 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11017 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11018 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11019 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11020 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11021 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11022 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11023 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11024 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11025 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11026 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11027 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11028 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11029 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11030 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11031 case VMX_EXIT_MOV_CRX:
11032 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11033 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11034 SET_BOTH(CRX_READ);
11035 else
11036 SET_BOTH(CRX_WRITE);
11037 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11038 break;
11039 case VMX_EXIT_MOV_DRX:
11040 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11041 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11042 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11043 SET_BOTH(DRX_READ);
11044 else
11045 SET_BOTH(DRX_WRITE);
11046 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11047 break;
11048 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11049 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11050 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11051 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11052 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11053 case VMX_EXIT_GDTR_IDTR_ACCESS:
11054 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11055 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11056 {
11057 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11058 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11059 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11060 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11061 }
11062 break;
11063
11064 case VMX_EXIT_LDTR_TR_ACCESS:
11065 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11066 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11067 {
11068 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11069 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11070 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11071 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11072 }
11073 break;
11074
11075 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11076 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11077 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11078 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11079 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11080 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11081 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11082 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11083 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11084 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11085 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11086
11087 /* Events that aren't relevant at this point. */
11088 case VMX_EXIT_EXT_INT:
11089 case VMX_EXIT_INT_WINDOW:
11090 case VMX_EXIT_NMI_WINDOW:
11091 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11092 case VMX_EXIT_PREEMPT_TIMER:
11093 case VMX_EXIT_IO_INSTR:
11094 break;
11095
11096 /* Errors and unexpected events. */
11097 case VMX_EXIT_INIT_SIGNAL:
11098 case VMX_EXIT_SIPI:
11099 case VMX_EXIT_IO_SMI:
11100 case VMX_EXIT_SMI:
11101 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11102 case VMX_EXIT_ERR_MSR_LOAD:
11103 case VMX_EXIT_ERR_MACHINE_CHECK:
11104 case VMX_EXIT_PML_FULL:
11105 case VMX_EXIT_VIRTUALIZED_EOI:
11106 break;
11107
11108 default:
11109 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11110 break;
11111 }
11112#undef SET_BOTH
11113#undef SET_EXIT
11114
11115 /*
11116 * Dtrace tracepoints go first. We do them here at once so we don't
11117 * have to copy the guest state saving and stuff a few dozen times.
11118 * Down side is that we've got to repeat the switch, though this time
11119 * we use enmEvent since the probes are a subset of what DBGF does.
11120 */
11121 if (fDtrace1 || fDtrace2)
11122 {
11123 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11124 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11125 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11126 switch (enmEvent1)
11127 {
11128 /** @todo consider which extra parameters would be helpful for each probe. */
11129 case DBGFEVENT_END: break;
11130 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11131 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11132 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11133 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11134 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11135 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11136 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11137 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11138 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11139 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11140 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11141 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11142 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11143 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11144 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11145 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11146 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11147 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11148 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11149 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11150 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11151 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11152 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11153 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11154 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11155 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11156 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11157 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11158 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11159 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11160 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11161 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11162 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11163 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11164 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11165 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11166 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11167 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11168 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11169 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11170 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11171 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11172 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11173 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11174 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11175 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11176 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11177 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11178 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11179 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11180 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11181 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11182 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11183 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11184 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11185 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11186 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11187 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11188 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11189 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11190 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11191 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11192 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11193 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11194 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11195 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11196 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11197 }
11198 switch (enmEvent2)
11199 {
11200 /** @todo consider which extra parameters would be helpful for each probe. */
11201 case DBGFEVENT_END: break;
11202 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11203 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11204 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11205 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11206 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11207 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11208 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11209 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11210 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11211 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11212 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11213 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11214 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11215 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11216 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11217 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11218 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11219 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11220 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11221 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11222 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11223 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11224 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11225 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11226 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11227 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11228 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11229 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11230 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11231 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11232 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11233 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11234 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11235 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11236 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11237 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11238 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11239 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11240 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11241 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11242 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11243 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11244 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11245 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11246 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11247 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11248 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11249 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11250 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11251 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11252 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11253 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11254 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11255 }
11256 }
11257
11258 /*
11259 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11260 * the DBGF call will do a full check).
11261 *
11262 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11263 * Note! If we have to events, we prioritize the first, i.e. the instruction
11264 * one, in order to avoid event nesting.
11265 */
11266 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11267 if ( enmEvent1 != DBGFEVENT_END
11268 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11269 {
11270 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11271 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11272 if (rcStrict != VINF_SUCCESS)
11273 return rcStrict;
11274 }
11275 else if ( enmEvent2 != DBGFEVENT_END
11276 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11277 {
11278 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11279 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11280 if (rcStrict != VINF_SUCCESS)
11281 return rcStrict;
11282 }
11283
11284 return VINF_SUCCESS;
11285}
11286
11287
11288/**
11289 * Single-stepping VM-exit filtering.
11290 *
11291 * This is preprocessing the VM-exits and deciding whether we've gotten far
11292 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11293 * handling is performed.
11294 *
11295 * @returns Strict VBox status code (i.e. informational status codes too).
11296 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11297 * @param pVmxTransient The VMX-transient structure.
11298 * @param pDbgState The debug state.
11299 */
11300DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11301{
11302 /*
11303 * Expensive (saves context) generic dtrace VM-exit probe.
11304 */
11305 uint32_t const uExitReason = pVmxTransient->uExitReason;
11306 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11307 { /* more likely */ }
11308 else
11309 {
11310 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11311 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11312 AssertRC(rc);
11313 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11314 }
11315
11316#ifndef IN_NEM_DARWIN
11317 /*
11318 * Check for host NMI, just to get that out of the way.
11319 */
11320 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11321 { /* normally likely */ }
11322 else
11323 {
11324 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11325 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11326 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11327 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11328 }
11329#endif
11330
11331 /*
11332 * Check for single stepping event if we're stepping.
11333 */
11334 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11335 {
11336 switch (uExitReason)
11337 {
11338 case VMX_EXIT_MTF:
11339 return vmxHCExitMtf(pVCpu, pVmxTransient);
11340
11341 /* Various events: */
11342 case VMX_EXIT_XCPT_OR_NMI:
11343 case VMX_EXIT_EXT_INT:
11344 case VMX_EXIT_TRIPLE_FAULT:
11345 case VMX_EXIT_INT_WINDOW:
11346 case VMX_EXIT_NMI_WINDOW:
11347 case VMX_EXIT_TASK_SWITCH:
11348 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11349 case VMX_EXIT_APIC_ACCESS:
11350 case VMX_EXIT_EPT_VIOLATION:
11351 case VMX_EXIT_EPT_MISCONFIG:
11352 case VMX_EXIT_PREEMPT_TIMER:
11353
11354 /* Instruction specific VM-exits: */
11355 case VMX_EXIT_CPUID:
11356 case VMX_EXIT_GETSEC:
11357 case VMX_EXIT_HLT:
11358 case VMX_EXIT_INVD:
11359 case VMX_EXIT_INVLPG:
11360 case VMX_EXIT_RDPMC:
11361 case VMX_EXIT_RDTSC:
11362 case VMX_EXIT_RSM:
11363 case VMX_EXIT_VMCALL:
11364 case VMX_EXIT_VMCLEAR:
11365 case VMX_EXIT_VMLAUNCH:
11366 case VMX_EXIT_VMPTRLD:
11367 case VMX_EXIT_VMPTRST:
11368 case VMX_EXIT_VMREAD:
11369 case VMX_EXIT_VMRESUME:
11370 case VMX_EXIT_VMWRITE:
11371 case VMX_EXIT_VMXOFF:
11372 case VMX_EXIT_VMXON:
11373 case VMX_EXIT_MOV_CRX:
11374 case VMX_EXIT_MOV_DRX:
11375 case VMX_EXIT_IO_INSTR:
11376 case VMX_EXIT_RDMSR:
11377 case VMX_EXIT_WRMSR:
11378 case VMX_EXIT_MWAIT:
11379 case VMX_EXIT_MONITOR:
11380 case VMX_EXIT_PAUSE:
11381 case VMX_EXIT_GDTR_IDTR_ACCESS:
11382 case VMX_EXIT_LDTR_TR_ACCESS:
11383 case VMX_EXIT_INVEPT:
11384 case VMX_EXIT_RDTSCP:
11385 case VMX_EXIT_INVVPID:
11386 case VMX_EXIT_WBINVD:
11387 case VMX_EXIT_XSETBV:
11388 case VMX_EXIT_RDRAND:
11389 case VMX_EXIT_INVPCID:
11390 case VMX_EXIT_VMFUNC:
11391 case VMX_EXIT_RDSEED:
11392 case VMX_EXIT_XSAVES:
11393 case VMX_EXIT_XRSTORS:
11394 {
11395 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11396 AssertRCReturn(rc, rc);
11397 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11398 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11399 return VINF_EM_DBG_STEPPED;
11400 break;
11401 }
11402
11403 /* Errors and unexpected events: */
11404 case VMX_EXIT_INIT_SIGNAL:
11405 case VMX_EXIT_SIPI:
11406 case VMX_EXIT_IO_SMI:
11407 case VMX_EXIT_SMI:
11408 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11409 case VMX_EXIT_ERR_MSR_LOAD:
11410 case VMX_EXIT_ERR_MACHINE_CHECK:
11411 case VMX_EXIT_PML_FULL:
11412 case VMX_EXIT_VIRTUALIZED_EOI:
11413 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11414 break;
11415
11416 default:
11417 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11418 break;
11419 }
11420 }
11421
11422 /*
11423 * Check for debugger event breakpoints and dtrace probes.
11424 */
11425 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11426 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11427 {
11428 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11429 if (rcStrict != VINF_SUCCESS)
11430 return rcStrict;
11431 }
11432
11433 /*
11434 * Normal processing.
11435 */
11436#ifdef HMVMX_USE_FUNCTION_TABLE
11437 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11438#else
11439 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11440#endif
11441}
11442
11443/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette