VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 96974

Last change on this file since 96974 was 96974, checked in by vboxsync, 3 years ago

VMM/VMXAllTemplate: Nested VMX: bugref:10092 Fix assertion when we fail vmlaunch/vmresume fails with an invalid-guest state.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 494.5 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 96974 2022-10-04 05:32:09Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41
42/** Use the function table. */
43#define HMVMX_USE_FUNCTION_TABLE
44
45/** Determine which tagged-TLB flush handler to use. */
46#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
47#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
48#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
49#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
50
51/** Assert that all the given fields have been read from the VMCS. */
52#ifdef VBOX_STRICT
53# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
54 do { \
55 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
56 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
57 } while (0)
58#else
59# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
60#endif
61
62/**
63 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
64 * guest using hardware-assisted VMX.
65 *
66 * This excludes state like GPRs (other than RSP) which are always are
67 * swapped and restored across the world-switch and also registers like EFER,
68 * MSR which cannot be modified by the guest without causing a VM-exit.
69 */
70#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
71 | CPUMCTX_EXTRN_RFLAGS \
72 | CPUMCTX_EXTRN_RSP \
73 | CPUMCTX_EXTRN_SREG_MASK \
74 | CPUMCTX_EXTRN_TABLE_MASK \
75 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
76 | CPUMCTX_EXTRN_SYSCALL_MSRS \
77 | CPUMCTX_EXTRN_SYSENTER_MSRS \
78 | CPUMCTX_EXTRN_TSC_AUX \
79 | CPUMCTX_EXTRN_OTHER_MSRS \
80 | CPUMCTX_EXTRN_CR0 \
81 | CPUMCTX_EXTRN_CR3 \
82 | CPUMCTX_EXTRN_CR4 \
83 | CPUMCTX_EXTRN_DR7 \
84 | CPUMCTX_EXTRN_HWVIRT \
85 | CPUMCTX_EXTRN_INHIBIT_INT \
86 | CPUMCTX_EXTRN_INHIBIT_NMI)
87
88/**
89 * Exception bitmap mask for real-mode guests (real-on-v86).
90 *
91 * We need to intercept all exceptions manually except:
92 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
93 * due to bugs in Intel CPUs.
94 * - \#PF need not be intercepted even in real-mode if we have nested paging
95 * support.
96 */
97#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
98 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
99 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
100 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
101 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
102 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
103 | RT_BIT(X86_XCPT_XF))
104
105/** Maximum VM-instruction error number. */
106#define HMVMX_INSTR_ERROR_MAX 28
107
108/** Profiling macro. */
109#ifdef HM_PROFILE_EXIT_DISPATCH
110# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
111# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
112#else
113# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
114# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
115#endif
116
117#ifndef IN_NEM_DARWIN
118/** Assert that preemption is disabled or covered by thread-context hooks. */
119# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
120 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
121
122/** Assert that we haven't migrated CPUs when thread-context hooks are not
123 * used. */
124# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
125 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
126 ("Illegal migration! Entered on CPU %u Current %u\n", \
127 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
128#else
129# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
130# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
131#endif
132
133/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
134 * context. */
135#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
136 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
137 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
138
139/** Log the VM-exit reason with an easily visible marker to identify it in a
140 * potential sea of logging data. */
141#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
142 do { \
143 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
144 HMGetVmxExitName(a_uExitReason))); \
145 } while (0) \
146
147
148/*********************************************************************************************************************************
149* Structures and Typedefs *
150*********************************************************************************************************************************/
151/**
152 * Memory operand read or write access.
153 */
154typedef enum VMXMEMACCESS
155{
156 VMXMEMACCESS_READ = 0,
157 VMXMEMACCESS_WRITE = 1
158} VMXMEMACCESS;
159
160
161/**
162 * VMX VM-exit handler.
163 *
164 * @returns Strict VBox status code (i.e. informational status codes too).
165 * @param pVCpu The cross context virtual CPU structure.
166 * @param pVmxTransient The VMX-transient structure.
167 */
168#ifndef HMVMX_USE_FUNCTION_TABLE
169typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
170#else
171typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
172/** Pointer to VM-exit handler. */
173typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
174#endif
175
176/**
177 * VMX VM-exit handler, non-strict status code.
178 *
179 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
180 *
181 * @returns VBox status code, no informational status code returned.
182 * @param pVCpu The cross context virtual CPU structure.
183 * @param pVmxTransient The VMX-transient structure.
184 *
185 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
186 * use of that status code will be replaced with VINF_EM_SOMETHING
187 * later when switching over to IEM.
188 */
189#ifndef HMVMX_USE_FUNCTION_TABLE
190typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
191#else
192typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
193#endif
194
195
196/*********************************************************************************************************************************
197* Internal Functions *
198*********************************************************************************************************************************/
199#ifndef HMVMX_USE_FUNCTION_TABLE
200DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
201# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
202# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
203#else
204# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
205# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
206#endif
207#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
208DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
209#endif
210
211static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
212
213/** @name VM-exit handler prototypes.
214 * @{
215 */
216static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
217static FNVMXEXITHANDLER vmxHCExitExtInt;
218static FNVMXEXITHANDLER vmxHCExitTripleFault;
219static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
220static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
221static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
222static FNVMXEXITHANDLER vmxHCExitCpuid;
223static FNVMXEXITHANDLER vmxHCExitGetsec;
224static FNVMXEXITHANDLER vmxHCExitHlt;
225static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
226static FNVMXEXITHANDLER vmxHCExitInvlpg;
227static FNVMXEXITHANDLER vmxHCExitRdpmc;
228static FNVMXEXITHANDLER vmxHCExitVmcall;
229#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
230static FNVMXEXITHANDLER vmxHCExitVmclear;
231static FNVMXEXITHANDLER vmxHCExitVmlaunch;
232static FNVMXEXITHANDLER vmxHCExitVmptrld;
233static FNVMXEXITHANDLER vmxHCExitVmptrst;
234static FNVMXEXITHANDLER vmxHCExitVmread;
235static FNVMXEXITHANDLER vmxHCExitVmresume;
236static FNVMXEXITHANDLER vmxHCExitVmwrite;
237static FNVMXEXITHANDLER vmxHCExitVmxoff;
238static FNVMXEXITHANDLER vmxHCExitVmxon;
239static FNVMXEXITHANDLER vmxHCExitInvvpid;
240# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
241static FNVMXEXITHANDLER vmxHCExitInvept;
242# endif
243#endif
244static FNVMXEXITHANDLER vmxHCExitRdtsc;
245static FNVMXEXITHANDLER vmxHCExitMovCRx;
246static FNVMXEXITHANDLER vmxHCExitMovDRx;
247static FNVMXEXITHANDLER vmxHCExitIoInstr;
248static FNVMXEXITHANDLER vmxHCExitRdmsr;
249static FNVMXEXITHANDLER vmxHCExitWrmsr;
250static FNVMXEXITHANDLER vmxHCExitMwait;
251static FNVMXEXITHANDLER vmxHCExitMtf;
252static FNVMXEXITHANDLER vmxHCExitMonitor;
253static FNVMXEXITHANDLER vmxHCExitPause;
254static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
255static FNVMXEXITHANDLER vmxHCExitApicAccess;
256static FNVMXEXITHANDLER vmxHCExitEptViolation;
257static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
258static FNVMXEXITHANDLER vmxHCExitRdtscp;
259static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
260static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
261static FNVMXEXITHANDLER vmxHCExitXsetbv;
262static FNVMXEXITHANDLER vmxHCExitInvpcid;
263#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
264static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
265#endif
266static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
267static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
268/** @} */
269
270#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
271/** @name Nested-guest VM-exit handler prototypes.
272 * @{
273 */
274static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
275static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
276static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
277static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
278static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
279static FNVMXEXITHANDLER vmxHCExitHltNested;
280static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
281static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
282static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
283static FNVMXEXITHANDLER vmxHCExitRdtscNested;
284static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
285static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
286static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
287static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
288static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
289static FNVMXEXITHANDLER vmxHCExitMwaitNested;
290static FNVMXEXITHANDLER vmxHCExitMtfNested;
291static FNVMXEXITHANDLER vmxHCExitMonitorNested;
292static FNVMXEXITHANDLER vmxHCExitPauseNested;
293static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
294static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
295static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
296static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
297static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
298static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
299static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
301static FNVMXEXITHANDLER vmxHCExitInstrNested;
302static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
303# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
304static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
305static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
306# endif
307/** @} */
308#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
309
310
311/*********************************************************************************************************************************
312* Global Variables *
313*********************************************************************************************************************************/
314#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
315/**
316 * Array of all VMCS fields.
317 * Any fields added to the VT-x spec. should be added here.
318 *
319 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
320 * of nested-guests.
321 */
322static const uint32_t g_aVmcsFields[] =
323{
324 /* 16-bit control fields. */
325 VMX_VMCS16_VPID,
326 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
327 VMX_VMCS16_EPTP_INDEX,
328
329 /* 16-bit guest-state fields. */
330 VMX_VMCS16_GUEST_ES_SEL,
331 VMX_VMCS16_GUEST_CS_SEL,
332 VMX_VMCS16_GUEST_SS_SEL,
333 VMX_VMCS16_GUEST_DS_SEL,
334 VMX_VMCS16_GUEST_FS_SEL,
335 VMX_VMCS16_GUEST_GS_SEL,
336 VMX_VMCS16_GUEST_LDTR_SEL,
337 VMX_VMCS16_GUEST_TR_SEL,
338 VMX_VMCS16_GUEST_INTR_STATUS,
339 VMX_VMCS16_GUEST_PML_INDEX,
340
341 /* 16-bits host-state fields. */
342 VMX_VMCS16_HOST_ES_SEL,
343 VMX_VMCS16_HOST_CS_SEL,
344 VMX_VMCS16_HOST_SS_SEL,
345 VMX_VMCS16_HOST_DS_SEL,
346 VMX_VMCS16_HOST_FS_SEL,
347 VMX_VMCS16_HOST_GS_SEL,
348 VMX_VMCS16_HOST_TR_SEL,
349
350 /* 64-bit control fields. */
351 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
352 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
353 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
355 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
356 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
357 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
358 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
361 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
364 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
365 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
366 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
367 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
368 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
369 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
370 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
371 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
372 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
373 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
374 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
375 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
376 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
377 VMX_VMCS64_CTRL_EPTP_FULL,
378 VMX_VMCS64_CTRL_EPTP_HIGH,
379 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
380 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
387 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
388 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
389 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
390 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
391 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
394 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
395 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
396 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
397 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_SPPTP_FULL,
400 VMX_VMCS64_CTRL_SPPTP_HIGH,
401 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
402 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
403 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
404 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
405 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
406 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
407
408 /* 64-bit read-only data fields. */
409 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
410 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
411
412 /* 64-bit guest-state fields. */
413 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
414 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
415 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
416 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
417 VMX_VMCS64_GUEST_PAT_FULL,
418 VMX_VMCS64_GUEST_PAT_HIGH,
419 VMX_VMCS64_GUEST_EFER_FULL,
420 VMX_VMCS64_GUEST_EFER_HIGH,
421 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
422 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
423 VMX_VMCS64_GUEST_PDPTE0_FULL,
424 VMX_VMCS64_GUEST_PDPTE0_HIGH,
425 VMX_VMCS64_GUEST_PDPTE1_FULL,
426 VMX_VMCS64_GUEST_PDPTE1_HIGH,
427 VMX_VMCS64_GUEST_PDPTE2_FULL,
428 VMX_VMCS64_GUEST_PDPTE2_HIGH,
429 VMX_VMCS64_GUEST_PDPTE3_FULL,
430 VMX_VMCS64_GUEST_PDPTE3_HIGH,
431 VMX_VMCS64_GUEST_BNDCFGS_FULL,
432 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
433 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
434 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
435 VMX_VMCS64_GUEST_PKRS_FULL,
436 VMX_VMCS64_GUEST_PKRS_HIGH,
437
438 /* 64-bit host-state fields. */
439 VMX_VMCS64_HOST_PAT_FULL,
440 VMX_VMCS64_HOST_PAT_HIGH,
441 VMX_VMCS64_HOST_EFER_FULL,
442 VMX_VMCS64_HOST_EFER_HIGH,
443 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
444 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
445 VMX_VMCS64_HOST_PKRS_FULL,
446 VMX_VMCS64_HOST_PKRS_HIGH,
447
448 /* 32-bit control fields. */
449 VMX_VMCS32_CTRL_PIN_EXEC,
450 VMX_VMCS32_CTRL_PROC_EXEC,
451 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
452 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
453 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
454 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
455 VMX_VMCS32_CTRL_EXIT,
456 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
457 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
458 VMX_VMCS32_CTRL_ENTRY,
459 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
461 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
462 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
463 VMX_VMCS32_CTRL_TPR_THRESHOLD,
464 VMX_VMCS32_CTRL_PROC_EXEC2,
465 VMX_VMCS32_CTRL_PLE_GAP,
466 VMX_VMCS32_CTRL_PLE_WINDOW,
467
468 /* 32-bits read-only fields. */
469 VMX_VMCS32_RO_VM_INSTR_ERROR,
470 VMX_VMCS32_RO_EXIT_REASON,
471 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
472 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
473 VMX_VMCS32_RO_IDT_VECTORING_INFO,
474 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
475 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
476 VMX_VMCS32_RO_EXIT_INSTR_INFO,
477
478 /* 32-bit guest-state fields. */
479 VMX_VMCS32_GUEST_ES_LIMIT,
480 VMX_VMCS32_GUEST_CS_LIMIT,
481 VMX_VMCS32_GUEST_SS_LIMIT,
482 VMX_VMCS32_GUEST_DS_LIMIT,
483 VMX_VMCS32_GUEST_FS_LIMIT,
484 VMX_VMCS32_GUEST_GS_LIMIT,
485 VMX_VMCS32_GUEST_LDTR_LIMIT,
486 VMX_VMCS32_GUEST_TR_LIMIT,
487 VMX_VMCS32_GUEST_GDTR_LIMIT,
488 VMX_VMCS32_GUEST_IDTR_LIMIT,
489 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
490 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
491 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_INT_STATE,
498 VMX_VMCS32_GUEST_ACTIVITY_STATE,
499 VMX_VMCS32_GUEST_SMBASE,
500 VMX_VMCS32_GUEST_SYSENTER_CS,
501 VMX_VMCS32_PREEMPT_TIMER_VALUE,
502
503 /* 32-bit host-state fields. */
504 VMX_VMCS32_HOST_SYSENTER_CS,
505
506 /* Natural-width control fields. */
507 VMX_VMCS_CTRL_CR0_MASK,
508 VMX_VMCS_CTRL_CR4_MASK,
509 VMX_VMCS_CTRL_CR0_READ_SHADOW,
510 VMX_VMCS_CTRL_CR4_READ_SHADOW,
511 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
512 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
515
516 /* Natural-width read-only data fields. */
517 VMX_VMCS_RO_EXIT_QUALIFICATION,
518 VMX_VMCS_RO_IO_RCX,
519 VMX_VMCS_RO_IO_RSI,
520 VMX_VMCS_RO_IO_RDI,
521 VMX_VMCS_RO_IO_RIP,
522 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
523
524 /* Natural-width guest-state field */
525 VMX_VMCS_GUEST_CR0,
526 VMX_VMCS_GUEST_CR3,
527 VMX_VMCS_GUEST_CR4,
528 VMX_VMCS_GUEST_ES_BASE,
529 VMX_VMCS_GUEST_CS_BASE,
530 VMX_VMCS_GUEST_SS_BASE,
531 VMX_VMCS_GUEST_DS_BASE,
532 VMX_VMCS_GUEST_FS_BASE,
533 VMX_VMCS_GUEST_GS_BASE,
534 VMX_VMCS_GUEST_LDTR_BASE,
535 VMX_VMCS_GUEST_TR_BASE,
536 VMX_VMCS_GUEST_GDTR_BASE,
537 VMX_VMCS_GUEST_IDTR_BASE,
538 VMX_VMCS_GUEST_DR7,
539 VMX_VMCS_GUEST_RSP,
540 VMX_VMCS_GUEST_RIP,
541 VMX_VMCS_GUEST_RFLAGS,
542 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
543 VMX_VMCS_GUEST_SYSENTER_ESP,
544 VMX_VMCS_GUEST_SYSENTER_EIP,
545 VMX_VMCS_GUEST_S_CET,
546 VMX_VMCS_GUEST_SSP,
547 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
548
549 /* Natural-width host-state fields */
550 VMX_VMCS_HOST_CR0,
551 VMX_VMCS_HOST_CR3,
552 VMX_VMCS_HOST_CR4,
553 VMX_VMCS_HOST_FS_BASE,
554 VMX_VMCS_HOST_GS_BASE,
555 VMX_VMCS_HOST_TR_BASE,
556 VMX_VMCS_HOST_GDTR_BASE,
557 VMX_VMCS_HOST_IDTR_BASE,
558 VMX_VMCS_HOST_SYSENTER_ESP,
559 VMX_VMCS_HOST_SYSENTER_EIP,
560 VMX_VMCS_HOST_RSP,
561 VMX_VMCS_HOST_RIP,
562 VMX_VMCS_HOST_S_CET,
563 VMX_VMCS_HOST_SSP,
564 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
565};
566#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
567
568#ifdef VBOX_STRICT
569static const uint32_t g_aVmcsSegBase[] =
570{
571 VMX_VMCS_GUEST_ES_BASE,
572 VMX_VMCS_GUEST_CS_BASE,
573 VMX_VMCS_GUEST_SS_BASE,
574 VMX_VMCS_GUEST_DS_BASE,
575 VMX_VMCS_GUEST_FS_BASE,
576 VMX_VMCS_GUEST_GS_BASE
577};
578static const uint32_t g_aVmcsSegSel[] =
579{
580 VMX_VMCS16_GUEST_ES_SEL,
581 VMX_VMCS16_GUEST_CS_SEL,
582 VMX_VMCS16_GUEST_SS_SEL,
583 VMX_VMCS16_GUEST_DS_SEL,
584 VMX_VMCS16_GUEST_FS_SEL,
585 VMX_VMCS16_GUEST_GS_SEL
586};
587static const uint32_t g_aVmcsSegLimit[] =
588{
589 VMX_VMCS32_GUEST_ES_LIMIT,
590 VMX_VMCS32_GUEST_CS_LIMIT,
591 VMX_VMCS32_GUEST_SS_LIMIT,
592 VMX_VMCS32_GUEST_DS_LIMIT,
593 VMX_VMCS32_GUEST_FS_LIMIT,
594 VMX_VMCS32_GUEST_GS_LIMIT
595};
596static const uint32_t g_aVmcsSegAttr[] =
597{
598 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
599 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
600 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
601 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
602 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
603 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
604};
605AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
606AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
607AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
608AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
609#endif /* VBOX_STRICT */
610
611#ifdef HMVMX_USE_FUNCTION_TABLE
612/**
613 * VMX_EXIT dispatch table.
614 */
615static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
616{
617 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
618 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
619 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
620 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
621 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
622 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
623 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
624 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
625 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
626 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
627 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
628 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
629 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
630 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
631 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
632 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
633 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
634 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
635 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
636#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
637 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
638 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
639 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
640 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
641 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
642 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
643 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
644 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
645 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
646#else
647 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
648 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
649 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
650 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
651 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
652 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
653 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
654 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
655 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
658 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
659 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
660 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
661 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
662 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
663 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
664 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
665 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
666 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
667 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
668 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
669 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
670 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
671 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
672 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
673 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
674 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
675 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
676 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
677 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
678 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
679#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
680 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
681#else
682 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
683#endif
684 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
685 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
686#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
687 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
688#else
689 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
690#endif
691 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
692 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
693 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
694 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
695 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
696 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
697 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
698 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
699 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
700 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
701 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
702 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
703 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
704 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
705 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
706 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
707};
708#endif /* HMVMX_USE_FUNCTION_TABLE */
709
710#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
711static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
712{
713 /* 0 */ "(Not Used)",
714 /* 1 */ "VMCALL executed in VMX root operation.",
715 /* 2 */ "VMCLEAR with invalid physical address.",
716 /* 3 */ "VMCLEAR with VMXON pointer.",
717 /* 4 */ "VMLAUNCH with non-clear VMCS.",
718 /* 5 */ "VMRESUME with non-launched VMCS.",
719 /* 6 */ "VMRESUME after VMXOFF",
720 /* 7 */ "VM-entry with invalid control fields.",
721 /* 8 */ "VM-entry with invalid host state fields.",
722 /* 9 */ "VMPTRLD with invalid physical address.",
723 /* 10 */ "VMPTRLD with VMXON pointer.",
724 /* 11 */ "VMPTRLD with incorrect revision identifier.",
725 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
726 /* 13 */ "VMWRITE to read-only VMCS component.",
727 /* 14 */ "(Not Used)",
728 /* 15 */ "VMXON executed in VMX root operation.",
729 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
730 /* 17 */ "VM-entry with non-launched executing VMCS.",
731 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
732 /* 19 */ "VMCALL with non-clear VMCS.",
733 /* 20 */ "VMCALL with invalid VM-exit control fields.",
734 /* 21 */ "(Not Used)",
735 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
736 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
737 /* 24 */ "VMCALL with invalid SMM-monitor features.",
738 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
739 /* 26 */ "VM-entry with events blocked by MOV SS.",
740 /* 27 */ "(Not Used)",
741 /* 28 */ "Invalid operand to INVEPT/INVVPID."
742};
743#endif /* VBOX_STRICT && LOG_ENABLED */
744
745
746/**
747 * Gets the CR0 guest/host mask.
748 *
749 * These bits typically does not change through the lifetime of a VM. Any bit set in
750 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
751 * by the guest.
752 *
753 * @returns The CR0 guest/host mask.
754 * @param pVCpu The cross context virtual CPU structure.
755 */
756static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
757{
758 /*
759 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
760 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
761 *
762 * Furthermore, modifications to any bits that are reserved/unspecified currently
763 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
764 * when future CPUs specify and use currently reserved/unspecified bits.
765 */
766 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
767 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
768 * and @bugref{6944}. */
769 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
770 return ( X86_CR0_PE
771 | X86_CR0_NE
772 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
773 | X86_CR0_PG
774 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
775}
776
777
778/**
779 * Gets the CR4 guest/host mask.
780 *
781 * These bits typically does not change through the lifetime of a VM. Any bit set in
782 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
783 * by the guest.
784 *
785 * @returns The CR4 guest/host mask.
786 * @param pVCpu The cross context virtual CPU structure.
787 */
788static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
789{
790 /*
791 * We construct a mask of all CR4 bits that the guest can modify without causing
792 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
793 * a VM-exit when the guest attempts to modify them when executing using
794 * hardware-assisted VMX.
795 *
796 * When a feature is not exposed to the guest (and may be present on the host),
797 * we want to intercept guest modifications to the bit so we can emulate proper
798 * behavior (e.g., #GP).
799 *
800 * Furthermore, only modifications to those bits that don't require immediate
801 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
802 * depends on CR3 which might not always be the guest value while executing
803 * using hardware-assisted VMX.
804 */
805 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
806 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
807#ifdef IN_NEM_DARWIN
808 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
809#endif
810 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
811
812 /*
813 * Paranoia.
814 * Ensure features exposed to the guest are present on the host.
815 */
816 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
817#ifdef IN_NEM_DARWIN
818 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
819#endif
820 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
821
822 uint64_t const fGstMask = X86_CR4_PVI
823 | X86_CR4_TSD
824 | X86_CR4_DE
825 | X86_CR4_MCE
826 | X86_CR4_PCE
827 | X86_CR4_OSXMMEEXCPT
828 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
829#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
830 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
831 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
832#endif
833 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
834 return ~fGstMask;
835}
836
837
838/**
839 * Adds one or more exceptions to the exception bitmap and commits it to the current
840 * VMCS.
841 *
842 * @param pVCpu The cross context virtual CPU structure.
843 * @param pVmxTransient The VMX-transient structure.
844 * @param uXcptMask The exception(s) to add.
845 */
846static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
847{
848 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
849 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
850 if ((uXcptBitmap & uXcptMask) != uXcptMask)
851 {
852 uXcptBitmap |= uXcptMask;
853 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
854 AssertRC(rc);
855 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
856 }
857}
858
859
860/**
861 * Adds an exception to the exception bitmap and commits it to the current VMCS.
862 *
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param pVmxTransient The VMX-transient structure.
865 * @param uXcpt The exception to add.
866 */
867static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
868{
869 Assert(uXcpt <= X86_XCPT_LAST);
870 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
871}
872
873
874/**
875 * Remove one or more exceptions from the exception bitmap and commits it to the
876 * current VMCS.
877 *
878 * This takes care of not removing the exception intercept if a nested-guest
879 * requires the exception to be intercepted.
880 *
881 * @returns VBox status code.
882 * @param pVCpu The cross context virtual CPU structure.
883 * @param pVmxTransient The VMX-transient structure.
884 * @param uXcptMask The exception(s) to remove.
885 */
886static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
887{
888 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
889 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
890 if (u32XcptBitmap & uXcptMask)
891 {
892#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
893 if (!pVmxTransient->fIsNestedGuest)
894 { /* likely */ }
895 else
896 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
897#endif
898#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
899 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
900 | RT_BIT(X86_XCPT_DE)
901 | RT_BIT(X86_XCPT_NM)
902 | RT_BIT(X86_XCPT_TS)
903 | RT_BIT(X86_XCPT_UD)
904 | RT_BIT(X86_XCPT_NP)
905 | RT_BIT(X86_XCPT_SS)
906 | RT_BIT(X86_XCPT_GP)
907 | RT_BIT(X86_XCPT_PF)
908 | RT_BIT(X86_XCPT_MF));
909#elif defined(HMVMX_ALWAYS_TRAP_PF)
910 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
911#endif
912 if (uXcptMask)
913 {
914 /* Validate we are not removing any essential exception intercepts. */
915#ifndef IN_NEM_DARWIN
916 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
917#else
918 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
919#endif
920 NOREF(pVCpu);
921 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
922 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
923
924 /* Remove it from the exception bitmap. */
925 u32XcptBitmap &= ~uXcptMask;
926
927 /* Commit and update the cache if necessary. */
928 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
929 {
930 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
931 AssertRC(rc);
932 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
933 }
934 }
935 }
936 return VINF_SUCCESS;
937}
938
939
940/**
941 * Remove an exceptions from the exception bitmap and commits it to the current
942 * VMCS.
943 *
944 * @returns VBox status code.
945 * @param pVCpu The cross context virtual CPU structure.
946 * @param pVmxTransient The VMX-transient structure.
947 * @param uXcpt The exception to remove.
948 */
949static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
950{
951 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
952}
953
954
955#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
956/**
957 * Loads the shadow VMCS specified by the VMCS info. object.
958 *
959 * @returns VBox status code.
960 * @param pVmcsInfo The VMCS info. object.
961 *
962 * @remarks Can be called with interrupts disabled.
963 */
964static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
965{
966 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
967 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
968
969 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
970 if (RT_SUCCESS(rc))
971 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
972 return rc;
973}
974
975
976/**
977 * Clears the shadow VMCS specified by the VMCS info. object.
978 *
979 * @returns VBox status code.
980 * @param pVmcsInfo The VMCS info. object.
981 *
982 * @remarks Can be called with interrupts disabled.
983 */
984static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
985{
986 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
987 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
988
989 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
990 if (RT_SUCCESS(rc))
991 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
992 return rc;
993}
994
995
996/**
997 * Switches from and to the specified VMCSes.
998 *
999 * @returns VBox status code.
1000 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1001 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1002 *
1003 * @remarks Called with interrupts disabled.
1004 */
1005static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1006{
1007 /*
1008 * Clear the VMCS we are switching out if it has not already been cleared.
1009 * This will sync any CPU internal data back to the VMCS.
1010 */
1011 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1012 {
1013 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1014 if (RT_SUCCESS(rc))
1015 {
1016 /*
1017 * The shadow VMCS, if any, would not be active at this point since we
1018 * would have cleared it while importing the virtual hardware-virtualization
1019 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1020 * clear the shadow VMCS here, just assert for safety.
1021 */
1022 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1023 }
1024 else
1025 return rc;
1026 }
1027
1028 /*
1029 * Clear the VMCS we are switching to if it has not already been cleared.
1030 * This will initialize the VMCS launch state to "clear" required for loading it.
1031 *
1032 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1033 */
1034 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1035 {
1036 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1037 if (RT_SUCCESS(rc))
1038 { /* likely */ }
1039 else
1040 return rc;
1041 }
1042
1043 /*
1044 * Finally, load the VMCS we are switching to.
1045 */
1046 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1047}
1048
1049
1050/**
1051 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1052 * caller.
1053 *
1054 * @returns VBox status code.
1055 * @param pVCpu The cross context virtual CPU structure.
1056 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1057 * true) or guest VMCS (pass false).
1058 */
1059static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1060{
1061 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1062 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1063
1064 PVMXVMCSINFO pVmcsInfoFrom;
1065 PVMXVMCSINFO pVmcsInfoTo;
1066 if (fSwitchToNstGstVmcs)
1067 {
1068 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1069 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1070 }
1071 else
1072 {
1073 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1074 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1075 }
1076
1077 /*
1078 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1079 * preemption hook code path acquires the current VMCS.
1080 */
1081 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1082
1083 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1084 if (RT_SUCCESS(rc))
1085 {
1086 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1087 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1088
1089 /*
1090 * If we are switching to a VMCS that was executed on a different host CPU or was
1091 * never executed before, flag that we need to export the host state before executing
1092 * guest/nested-guest code using hardware-assisted VMX.
1093 *
1094 * This could probably be done in a preemptible context since the preemption hook
1095 * will flag the necessary change in host context. However, since preemption is
1096 * already disabled and to avoid making assumptions about host specific code in
1097 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1098 * disabled.
1099 */
1100 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1101 { /* likely */ }
1102 else
1103 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1104
1105 ASMSetFlags(fEFlags);
1106
1107 /*
1108 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1109 * flag that we need to update the host MSR values there. Even if we decide in the
1110 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1111 * if its content differs, we would have to update the host MSRs anyway.
1112 */
1113 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1114 }
1115 else
1116 ASMSetFlags(fEFlags);
1117 return rc;
1118}
1119#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1120
1121
1122#ifdef VBOX_STRICT
1123/**
1124 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1125 * transient structure.
1126 *
1127 * @param pVCpu The cross context virtual CPU structure.
1128 * @param pVmxTransient The VMX-transient structure.
1129 */
1130DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1131{
1132 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1133 AssertRC(rc);
1134}
1135
1136
1137/**
1138 * Reads the VM-entry exception error code field from the VMCS into
1139 * the VMX transient structure.
1140 *
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1145{
1146 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1147 AssertRC(rc);
1148}
1149
1150
1151/**
1152 * Reads the VM-entry exception error code field from the VMCS into
1153 * the VMX transient structure.
1154 *
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param pVmxTransient The VMX-transient structure.
1157 */
1158DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1159{
1160 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1161 AssertRC(rc);
1162}
1163#endif /* VBOX_STRICT */
1164
1165
1166/**
1167 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1168 * transient structure.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure.
1171 * @param pVmxTransient The VMX-transient structure.
1172 */
1173DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1174{
1175 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1176 {
1177 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1178 AssertRC(rc);
1179 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1180 }
1181}
1182
1183
1184/**
1185 * Reads the VM-exit interruption error code from the VMCS into the VMX
1186 * transient structure.
1187 *
1188 * @param pVCpu The cross context virtual CPU structure.
1189 * @param pVmxTransient The VMX-transient structure.
1190 */
1191DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1192{
1193 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1194 {
1195 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1196 AssertRC(rc);
1197 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1198 }
1199}
1200
1201
1202/**
1203 * Reads the VM-exit instruction length field from the VMCS into the VMX
1204 * transient structure.
1205 *
1206 * @param pVCpu The cross context virtual CPU structure.
1207 * @param pVmxTransient The VMX-transient structure.
1208 */
1209DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1210{
1211 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1212 {
1213 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1214 AssertRC(rc);
1215 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1216 }
1217}
1218
1219
1220/**
1221 * Reads the VM-exit instruction-information field from the VMCS into
1222 * the VMX transient structure.
1223 *
1224 * @param pVCpu The cross context virtual CPU structure.
1225 * @param pVmxTransient The VMX-transient structure.
1226 */
1227DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1228{
1229 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1230 {
1231 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1232 AssertRC(rc);
1233 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1234 }
1235}
1236
1237
1238/**
1239 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1240 *
1241 * @param pVCpu The cross context virtual CPU structure.
1242 * @param pVmxTransient The VMX-transient structure.
1243 */
1244DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1245{
1246 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1247 {
1248 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1249 AssertRC(rc);
1250 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1251 }
1252}
1253
1254
1255/**
1256 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1257 *
1258 * @param pVCpu The cross context virtual CPU structure.
1259 * @param pVmxTransient The VMX-transient structure.
1260 */
1261DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1262{
1263 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1264 {
1265 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1266 AssertRC(rc);
1267 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1268 }
1269}
1270
1271
1272/**
1273 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1274 *
1275 * @param pVCpu The cross context virtual CPU structure.
1276 * @param pVmxTransient The VMX-transient structure.
1277 */
1278DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1279{
1280 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1281 {
1282 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1283 AssertRC(rc);
1284 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1285 }
1286}
1287
1288#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1289/**
1290 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1291 * structure.
1292 *
1293 * @param pVCpu The cross context virtual CPU structure.
1294 * @param pVmxTransient The VMX-transient structure.
1295 */
1296DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1297{
1298 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1299 {
1300 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1301 AssertRC(rc);
1302 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1303 }
1304}
1305#endif
1306
1307/**
1308 * Reads the IDT-vectoring information field from the VMCS into the VMX
1309 * transient structure.
1310 *
1311 * @param pVCpu The cross context virtual CPU structure.
1312 * @param pVmxTransient The VMX-transient structure.
1313 *
1314 * @remarks No-long-jump zone!!!
1315 */
1316DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1317{
1318 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1319 {
1320 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1321 AssertRC(rc);
1322 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1323 }
1324}
1325
1326
1327/**
1328 * Reads the IDT-vectoring error code from the VMCS into the VMX
1329 * transient structure.
1330 *
1331 * @param pVCpu The cross context virtual CPU structure.
1332 * @param pVmxTransient The VMX-transient structure.
1333 */
1334DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1335{
1336 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1337 {
1338 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1341 }
1342}
1343
1344#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1345/**
1346 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1347 *
1348 * @param pVCpu The cross context virtual CPU structure.
1349 * @param pVmxTransient The VMX-transient structure.
1350 */
1351static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1352{
1353 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1354 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1355 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1356 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1357 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1358 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1359 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1360 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1361 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1362 AssertRC(rc);
1363 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1364 | HMVMX_READ_EXIT_INSTR_LEN
1365 | HMVMX_READ_EXIT_INSTR_INFO
1366 | HMVMX_READ_IDT_VECTORING_INFO
1367 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1368 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1369 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1370 | HMVMX_READ_GUEST_LINEAR_ADDR
1371 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1372}
1373#endif
1374
1375/**
1376 * Verifies that our cached values of the VMCS fields are all consistent with
1377 * what's actually present in the VMCS.
1378 *
1379 * @returns VBox status code.
1380 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1381 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1382 * VMCS content. HMCPU error-field is
1383 * updated, see VMX_VCI_XXX.
1384 * @param pVCpu The cross context virtual CPU structure.
1385 * @param pVmcsInfo The VMCS info. object.
1386 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1387 */
1388static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1389{
1390 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1391
1392 uint32_t u32Val;
1393 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1394 AssertRC(rc);
1395 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1396 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1397 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1398 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1399
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1403 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406
1407 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1408 AssertRC(rc);
1409 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1410 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1411 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1412 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1413
1414 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1415 AssertRC(rc);
1416 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1417 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1418 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1419 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1420
1421 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1422 {
1423 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1424 AssertRC(rc);
1425 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1426 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1427 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1428 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1429 }
1430
1431 uint64_t u64Val;
1432 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1433 {
1434 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1435 AssertRC(rc);
1436 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1437 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1438 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1439 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1440 }
1441
1442 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1443 AssertRC(rc);
1444 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1445 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1446 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1447 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1448
1449 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1450 AssertRC(rc);
1451 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1452 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1453 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1454 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1455
1456 NOREF(pcszVmcs);
1457 return VINF_SUCCESS;
1458}
1459
1460
1461/**
1462 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1463 * VMCS.
1464 *
1465 * This is typically required when the guest changes paging mode.
1466 *
1467 * @returns VBox status code.
1468 * @param pVCpu The cross context virtual CPU structure.
1469 * @param pVmxTransient The VMX-transient structure.
1470 *
1471 * @remarks Requires EFER.
1472 * @remarks No-long-jump zone!!!
1473 */
1474static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1475{
1476 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1477 {
1478 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1479 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1480
1481 /*
1482 * VM-entry controls.
1483 */
1484 {
1485 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1486 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1487
1488 /*
1489 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1490 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1491 *
1492 * For nested-guests, this is a mandatory VM-entry control. It's also
1493 * required because we do not want to leak host bits to the nested-guest.
1494 */
1495 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1496
1497 /*
1498 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1499 *
1500 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1501 * required to get the nested-guest working with hardware-assisted VMX execution.
1502 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1503 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1504 * here rather than while merging the guest VMCS controls.
1505 */
1506 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1507 {
1508 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1509 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1510 }
1511 else
1512 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1513
1514 /*
1515 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1516 *
1517 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1518 * regardless of whether the nested-guest VMCS specifies it because we are free to
1519 * load whatever MSRs we require and we do not need to modify the guest visible copy
1520 * of the VM-entry MSR load area.
1521 */
1522 if ( g_fHmVmxSupportsVmcsEfer
1523#ifndef IN_NEM_DARWIN
1524 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1525#endif
1526 )
1527 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1528 else
1529 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1530
1531 /*
1532 * The following should -not- be set (since we're not in SMM mode):
1533 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1534 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1535 */
1536
1537 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1538 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1539
1540 if ((fVal & fZap) == fVal)
1541 { /* likely */ }
1542 else
1543 {
1544 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1545 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1546 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1547 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1548 }
1549
1550 /* Commit it to the VMCS. */
1551 if (pVmcsInfo->u32EntryCtls != fVal)
1552 {
1553 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1554 AssertRC(rc);
1555 pVmcsInfo->u32EntryCtls = fVal;
1556 }
1557 }
1558
1559 /*
1560 * VM-exit controls.
1561 */
1562 {
1563 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1564 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1565
1566 /*
1567 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1568 * supported the 1-setting of this bit.
1569 *
1570 * For nested-guests, we set the "save debug controls" as the converse
1571 * "load debug controls" is mandatory for nested-guests anyway.
1572 */
1573 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1574
1575 /*
1576 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1577 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1578 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1579 * vmxHCExportHostMsrs().
1580 *
1581 * For nested-guests, we always set this bit as we do not support 32-bit
1582 * hosts.
1583 */
1584 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1585
1586#ifndef IN_NEM_DARWIN
1587 /*
1588 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1589 *
1590 * For nested-guests, we should use the "save IA32_EFER" control if we also
1591 * used the "load IA32_EFER" control while exporting VM-entry controls.
1592 */
1593 if ( g_fHmVmxSupportsVmcsEfer
1594 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1595 {
1596 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1597 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1598 }
1599#endif
1600
1601 /*
1602 * Enable saving of the VMX-preemption timer value on VM-exit.
1603 * For nested-guests, currently not exposed/used.
1604 */
1605 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1606 * the timer value. */
1607 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1608 {
1609 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1610 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1611 }
1612
1613 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1614 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1615
1616 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1617 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1618 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1619
1620 if ((fVal & fZap) == fVal)
1621 { /* likely */ }
1622 else
1623 {
1624 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1625 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1626 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1627 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1628 }
1629
1630 /* Commit it to the VMCS. */
1631 if (pVmcsInfo->u32ExitCtls != fVal)
1632 {
1633 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1634 AssertRC(rc);
1635 pVmcsInfo->u32ExitCtls = fVal;
1636 }
1637 }
1638
1639 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1640 }
1641 return VINF_SUCCESS;
1642}
1643
1644
1645/**
1646 * Sets the TPR threshold in the VMCS.
1647 *
1648 * @param pVCpu The cross context virtual CPU structure.
1649 * @param pVmcsInfo The VMCS info. object.
1650 * @param u32TprThreshold The TPR threshold (task-priority class only).
1651 */
1652DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1653{
1654 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1655 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1656 RT_NOREF(pVmcsInfo);
1657 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1658 AssertRC(rc);
1659}
1660
1661
1662/**
1663 * Exports the guest APIC TPR state into the VMCS.
1664 *
1665 * @param pVCpu The cross context virtual CPU structure.
1666 * @param pVmxTransient The VMX-transient structure.
1667 *
1668 * @remarks No-long-jump zone!!!
1669 */
1670static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1671{
1672 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1673 {
1674 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1675
1676 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1677 if (!pVmxTransient->fIsNestedGuest)
1678 {
1679 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1680 && APICIsEnabled(pVCpu))
1681 {
1682 /*
1683 * Setup TPR shadowing.
1684 */
1685 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1686 {
1687 bool fPendingIntr = false;
1688 uint8_t u8Tpr = 0;
1689 uint8_t u8PendingIntr = 0;
1690 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1691 AssertRC(rc);
1692
1693 /*
1694 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1695 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1696 * priority of the pending interrupt so we can deliver the interrupt. If there
1697 * are no interrupts pending, set threshold to 0 to not cause any
1698 * TPR-below-threshold VM-exits.
1699 */
1700 uint32_t u32TprThreshold = 0;
1701 if (fPendingIntr)
1702 {
1703 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1704 (which is the Task-Priority Class). */
1705 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1706 const uint8_t u8TprPriority = u8Tpr >> 4;
1707 if (u8PendingPriority <= u8TprPriority)
1708 u32TprThreshold = u8PendingPriority;
1709 }
1710
1711 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1712 }
1713 }
1714 }
1715 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1716 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1717 }
1718}
1719
1720
1721/**
1722 * Gets the guest interruptibility-state and updates related force-flags.
1723 *
1724 * @returns Guest's interruptibility-state.
1725 * @param pVCpu The cross context virtual CPU structure.
1726 *
1727 * @remarks No-long-jump zone!!!
1728 */
1729static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1730{
1731 /*
1732 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1733 */
1734 uint32_t fIntrState = 0;
1735 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1736 {
1737 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1738 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1739
1740 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1741 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1742 {
1743 if (pCtx->eflags.Bits.u1IF)
1744 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1745 else
1746 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1747 }
1748 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1749 {
1750 /*
1751 * We can clear the inhibit force flag as even if we go back to the recompiler
1752 * without executing guest code in VT-x, the flag's condition to be cleared is
1753 * met and thus the cleared state is correct.
1754 */
1755 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1756 }
1757 }
1758
1759 /*
1760 * Check if we should inhibit NMI delivery.
1761 */
1762 if (CPUMIsGuestNmiBlocking(pVCpu))
1763 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1764
1765 /*
1766 * Validate.
1767 */
1768#ifdef VBOX_STRICT
1769 /* We don't support block-by-SMI yet.*/
1770 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1771
1772 /* Block-by-STI must not be set when interrupts are disabled. */
1773 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1774 {
1775 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1776 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1777 }
1778#endif
1779
1780 return fIntrState;
1781}
1782
1783
1784/**
1785 * Exports the exception intercepts required for guest execution in the VMCS.
1786 *
1787 * @param pVCpu The cross context virtual CPU structure.
1788 * @param pVmxTransient The VMX-transient structure.
1789 *
1790 * @remarks No-long-jump zone!!!
1791 */
1792static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1793{
1794 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1795 {
1796 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1797 if ( !pVmxTransient->fIsNestedGuest
1798 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1799 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1800 else
1801 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1802
1803 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1804 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1805 }
1806}
1807
1808
1809/**
1810 * Exports the guest's RIP into the guest-state area in the VMCS.
1811 *
1812 * @param pVCpu The cross context virtual CPU structure.
1813 *
1814 * @remarks No-long-jump zone!!!
1815 */
1816static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1817{
1818 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1819 {
1820 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1821
1822 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1823 AssertRC(rc);
1824
1825 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1826 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1827 }
1828}
1829
1830
1831/**
1832 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1833 *
1834 * @param pVCpu The cross context virtual CPU structure.
1835 * @param pVmxTransient The VMX-transient structure.
1836 *
1837 * @remarks No-long-jump zone!!!
1838 */
1839static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1840{
1841 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1842 {
1843 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1844
1845 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1846 Let us assert it as such and use 32-bit VMWRITE. */
1847 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1848 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1849 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1850 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1851
1852#ifndef IN_NEM_DARWIN
1853 /*
1854 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1855 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1856 * can run the real-mode guest code under Virtual 8086 mode.
1857 */
1858 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1859 if (pVmcsInfo->RealMode.fRealOnV86Active)
1860 {
1861 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1862 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1863 Assert(!pVmxTransient->fIsNestedGuest);
1864 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1865 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1866 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1867 }
1868#else
1869 RT_NOREF(pVmxTransient);
1870#endif
1871
1872 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1873 AssertRC(rc);
1874
1875 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1876 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1877 }
1878}
1879
1880
1881#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1882/**
1883 * Copies the nested-guest VMCS to the shadow VMCS.
1884 *
1885 * @returns VBox status code.
1886 * @param pVCpu The cross context virtual CPU structure.
1887 * @param pVmcsInfo The VMCS info. object.
1888 *
1889 * @remarks No-long-jump zone!!!
1890 */
1891static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1892{
1893 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1894 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1895
1896 /*
1897 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1898 * current VMCS, as we may try saving guest lazy MSRs.
1899 *
1900 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1901 * calling the import VMCS code which is currently performing the guest MSR reads
1902 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1903 * and the rest of the VMX leave session machinery.
1904 */
1905 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1906
1907 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1908 if (RT_SUCCESS(rc))
1909 {
1910 /*
1911 * Copy all guest read/write VMCS fields.
1912 *
1913 * We don't check for VMWRITE failures here for performance reasons and
1914 * because they are not expected to fail, barring irrecoverable conditions
1915 * like hardware errors.
1916 */
1917 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1918 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1919 {
1920 uint64_t u64Val;
1921 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1922 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1923 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1924 }
1925
1926 /*
1927 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1928 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1929 */
1930 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1931 {
1932 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1933 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1934 {
1935 uint64_t u64Val;
1936 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1937 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1938 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1939 }
1940 }
1941
1942 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1943 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1944 }
1945
1946 ASMSetFlags(fEFlags);
1947 return rc;
1948}
1949
1950
1951/**
1952 * Copies the shadow VMCS to the nested-guest VMCS.
1953 *
1954 * @returns VBox status code.
1955 * @param pVCpu The cross context virtual CPU structure.
1956 * @param pVmcsInfo The VMCS info. object.
1957 *
1958 * @remarks Called with interrupts disabled.
1959 */
1960static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1961{
1962 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1963 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1964 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1965
1966 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1967 if (RT_SUCCESS(rc))
1968 {
1969 /*
1970 * Copy guest read/write fields from the shadow VMCS.
1971 * Guest read-only fields cannot be modified, so no need to copy them.
1972 *
1973 * We don't check for VMREAD failures here for performance reasons and
1974 * because they are not expected to fail, barring irrecoverable conditions
1975 * like hardware errors.
1976 */
1977 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1978 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1979 {
1980 uint64_t u64Val;
1981 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1982 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1983 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1984 }
1985
1986 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1987 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1988 }
1989 return rc;
1990}
1991
1992
1993/**
1994 * Enables VMCS shadowing for the given VMCS info. object.
1995 *
1996 * @param pVCpu The cross context virtual CPU structure.
1997 * @param pVmcsInfo The VMCS info. object.
1998 *
1999 * @remarks No-long-jump zone!!!
2000 */
2001static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2002{
2003 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2004 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2005 {
2006 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2007 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2008 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2009 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2010 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2011 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2012 Log4Func(("Enabled\n"));
2013 }
2014}
2015
2016
2017/**
2018 * Disables VMCS shadowing for the given VMCS info. object.
2019 *
2020 * @param pVCpu The cross context virtual CPU structure.
2021 * @param pVmcsInfo The VMCS info. object.
2022 *
2023 * @remarks No-long-jump zone!!!
2024 */
2025static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2026{
2027 /*
2028 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2029 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2030 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2031 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2032 *
2033 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2034 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2035 */
2036 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2037 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2038 {
2039 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2040 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2041 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2042 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2043 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2044 Log4Func(("Disabled\n"));
2045 }
2046}
2047#endif
2048
2049
2050/**
2051 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2052 *
2053 * The guest FPU state is always pre-loaded hence we don't need to bother about
2054 * sharing FPU related CR0 bits between the guest and host.
2055 *
2056 * @returns VBox status code.
2057 * @param pVCpu The cross context virtual CPU structure.
2058 * @param pVmxTransient The VMX-transient structure.
2059 *
2060 * @remarks No-long-jump zone!!!
2061 */
2062static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2063{
2064 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2065 {
2066 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2067 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2068
2069 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2070 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2071 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2072 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2073 else
2074 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2075
2076 if (!pVmxTransient->fIsNestedGuest)
2077 {
2078 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2079 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2080 uint64_t const u64ShadowCr0 = u64GuestCr0;
2081 Assert(!RT_HI_U32(u64GuestCr0));
2082
2083 /*
2084 * Setup VT-x's view of the guest CR0.
2085 */
2086 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2087 if (VM_IS_VMX_NESTED_PAGING(pVM))
2088 {
2089#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2090 if (CPUMIsGuestPagingEnabled(pVCpu))
2091 {
2092 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2093 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2094 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2095 }
2096 else
2097 {
2098 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2099 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2100 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2101 }
2102
2103 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2104 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2105 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2106#endif
2107 }
2108 else
2109 {
2110 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2111 u64GuestCr0 |= X86_CR0_WP;
2112 }
2113
2114 /*
2115 * Guest FPU bits.
2116 *
2117 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2118 * using CR0.TS.
2119 *
2120 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2121 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2122 */
2123 u64GuestCr0 |= X86_CR0_NE;
2124
2125 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2126 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2127
2128 /*
2129 * Update exception intercepts.
2130 */
2131 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2132#ifndef IN_NEM_DARWIN
2133 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2134 {
2135 Assert(PDMVmmDevHeapIsEnabled(pVM));
2136 Assert(pVM->hm.s.vmx.pRealModeTSS);
2137 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2138 }
2139 else
2140#endif
2141 {
2142 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2143 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2144 if (fInterceptMF)
2145 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2146 }
2147
2148 /* Additional intercepts for debugging, define these yourself explicitly. */
2149#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2150 uXcptBitmap |= 0
2151 | RT_BIT(X86_XCPT_BP)
2152 | RT_BIT(X86_XCPT_DE)
2153 | RT_BIT(X86_XCPT_NM)
2154 | RT_BIT(X86_XCPT_TS)
2155 | RT_BIT(X86_XCPT_UD)
2156 | RT_BIT(X86_XCPT_NP)
2157 | RT_BIT(X86_XCPT_SS)
2158 | RT_BIT(X86_XCPT_GP)
2159 | RT_BIT(X86_XCPT_PF)
2160 | RT_BIT(X86_XCPT_MF)
2161 ;
2162#elif defined(HMVMX_ALWAYS_TRAP_PF)
2163 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2164#endif
2165 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2166 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2167 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2168 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2169 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2170
2171 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2172 u64GuestCr0 |= fSetCr0;
2173 u64GuestCr0 &= fZapCr0;
2174 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2175
2176 /* Commit the CR0 and related fields to the guest VMCS. */
2177 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2178 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2179 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2180 {
2181 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2182 AssertRC(rc);
2183 }
2184 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2185 {
2186 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2187 AssertRC(rc);
2188 }
2189
2190 /* Update our caches. */
2191 pVmcsInfo->u32ProcCtls = uProcCtls;
2192 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2193
2194 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2195 }
2196 else
2197 {
2198 /*
2199 * With nested-guests, we may have extended the guest/host mask here since we
2200 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2201 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2202 * originally supplied. We must copy those bits from the nested-guest CR0 into
2203 * the nested-guest CR0 read-shadow.
2204 */
2205 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2206 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2207 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2208 Assert(!RT_HI_U32(u64GuestCr0));
2209 Assert(u64GuestCr0 & X86_CR0_NE);
2210
2211 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2212 u64GuestCr0 |= fSetCr0;
2213 u64GuestCr0 &= fZapCr0;
2214 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2215
2216 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2217 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2218 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2219
2220 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2221 }
2222
2223 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2224 }
2225
2226 return VINF_SUCCESS;
2227}
2228
2229
2230/**
2231 * Exports the guest control registers (CR3, CR4) into the guest-state area
2232 * in the VMCS.
2233 *
2234 * @returns VBox strict status code.
2235 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2236 * without unrestricted guest access and the VMMDev is not presently
2237 * mapped (e.g. EFI32).
2238 *
2239 * @param pVCpu The cross context virtual CPU structure.
2240 * @param pVmxTransient The VMX-transient structure.
2241 *
2242 * @remarks No-long-jump zone!!!
2243 */
2244static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2245{
2246 int rc = VINF_SUCCESS;
2247 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2248
2249 /*
2250 * Guest CR2.
2251 * It's always loaded in the assembler code. Nothing to do here.
2252 */
2253
2254 /*
2255 * Guest CR3.
2256 */
2257 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2258 {
2259 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2260
2261 if (VM_IS_VMX_NESTED_PAGING(pVM))
2262 {
2263#ifndef IN_NEM_DARWIN
2264 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2265 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2266
2267 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2268 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2269 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2270 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2271
2272 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2273 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2274 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2275
2276 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2277 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2278 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2279 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2280 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2281 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2282 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2283
2284 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2285 AssertRC(rc);
2286#endif
2287
2288 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2289 uint64_t u64GuestCr3 = pCtx->cr3;
2290 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2291 || CPUMIsGuestPagingEnabledEx(pCtx))
2292 {
2293 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2294 if (CPUMIsGuestInPAEModeEx(pCtx))
2295 {
2296 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2297 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2298 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2299 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2300 }
2301
2302 /*
2303 * The guest's view of its CR3 is unblemished with nested paging when the
2304 * guest is using paging or we have unrestricted guest execution to handle
2305 * the guest when it's not using paging.
2306 */
2307 }
2308#ifndef IN_NEM_DARWIN
2309 else
2310 {
2311 /*
2312 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2313 * thinks it accesses physical memory directly, we use our identity-mapped
2314 * page table to map guest-linear to guest-physical addresses. EPT takes care
2315 * of translating it to host-physical addresses.
2316 */
2317 RTGCPHYS GCPhys;
2318 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2319
2320 /* We obtain it here every time as the guest could have relocated this PCI region. */
2321 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2322 if (RT_SUCCESS(rc))
2323 { /* likely */ }
2324 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2325 {
2326 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2327 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2328 }
2329 else
2330 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2331
2332 u64GuestCr3 = GCPhys;
2333 }
2334#endif
2335
2336 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2337 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2338 AssertRC(rc);
2339 }
2340 else
2341 {
2342 Assert(!pVmxTransient->fIsNestedGuest);
2343 /* Non-nested paging case, just use the hypervisor's CR3. */
2344 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2345
2346 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2347 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2348 AssertRC(rc);
2349 }
2350
2351 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2352 }
2353
2354 /*
2355 * Guest CR4.
2356 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2357 */
2358 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2359 {
2360 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2361 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2362
2363 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2364 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2365
2366 /*
2367 * With nested-guests, we may have extended the guest/host mask here (since we
2368 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2369 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2370 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2371 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2372 */
2373 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2374 uint64_t u64GuestCr4 = pCtx->cr4;
2375 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2376 ? pCtx->cr4
2377 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2378 Assert(!RT_HI_U32(u64GuestCr4));
2379
2380#ifndef IN_NEM_DARWIN
2381 /*
2382 * Setup VT-x's view of the guest CR4.
2383 *
2384 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2385 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2386 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2387 *
2388 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2389 */
2390 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2391 {
2392 Assert(pVM->hm.s.vmx.pRealModeTSS);
2393 Assert(PDMVmmDevHeapIsEnabled(pVM));
2394 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2395 }
2396#endif
2397
2398 if (VM_IS_VMX_NESTED_PAGING(pVM))
2399 {
2400 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2401 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2402 {
2403 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2404 u64GuestCr4 |= X86_CR4_PSE;
2405 /* Our identity mapping is a 32-bit page directory. */
2406 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2407 }
2408 /* else use guest CR4.*/
2409 }
2410 else
2411 {
2412 Assert(!pVmxTransient->fIsNestedGuest);
2413
2414 /*
2415 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2416 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2417 */
2418 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2419 {
2420 case PGMMODE_REAL: /* Real-mode. */
2421 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2422 case PGMMODE_32_BIT: /* 32-bit paging. */
2423 {
2424 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2425 break;
2426 }
2427
2428 case PGMMODE_PAE: /* PAE paging. */
2429 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2430 {
2431 u64GuestCr4 |= X86_CR4_PAE;
2432 break;
2433 }
2434
2435 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2436 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2437 {
2438#ifdef VBOX_WITH_64_BITS_GUESTS
2439 /* For our assumption in vmxHCShouldSwapEferMsr. */
2440 Assert(u64GuestCr4 & X86_CR4_PAE);
2441 break;
2442#endif
2443 }
2444 default:
2445 AssertFailed();
2446 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2447 }
2448 }
2449
2450 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2451 u64GuestCr4 |= fSetCr4;
2452 u64GuestCr4 &= fZapCr4;
2453
2454 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2455 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2456 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2457
2458#ifndef IN_NEM_DARWIN
2459 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2460 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2461 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2462 {
2463 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2464 hmR0VmxUpdateStartVmFunction(pVCpu);
2465 }
2466#endif
2467
2468 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2469
2470 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2471 }
2472 return rc;
2473}
2474
2475
2476#ifdef VBOX_STRICT
2477/**
2478 * Strict function to validate segment registers.
2479 *
2480 * @param pVCpu The cross context virtual CPU structure.
2481 * @param pVmcsInfo The VMCS info. object.
2482 *
2483 * @remarks Will import guest CR0 on strict builds during validation of
2484 * segments.
2485 */
2486static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2487{
2488 /*
2489 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2490 *
2491 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2492 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2493 * unusable bit and doesn't change the guest-context value.
2494 */
2495 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2496 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2497 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2498 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2499 && ( !CPUMIsGuestInRealModeEx(pCtx)
2500 && !CPUMIsGuestInV86ModeEx(pCtx)))
2501 {
2502 /* Protected mode checks */
2503 /* CS */
2504 Assert(pCtx->cs.Attr.n.u1Present);
2505 Assert(!(pCtx->cs.Attr.u & 0xf00));
2506 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2507 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2508 || !(pCtx->cs.Attr.n.u1Granularity));
2509 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2510 || (pCtx->cs.Attr.n.u1Granularity));
2511 /* CS cannot be loaded with NULL in protected mode. */
2512 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2513 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2514 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2515 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2516 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2517 else
2518 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2519 /* SS */
2520 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2521 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2522 if ( !(pCtx->cr0 & X86_CR0_PE)
2523 || pCtx->cs.Attr.n.u4Type == 3)
2524 {
2525 Assert(!pCtx->ss.Attr.n.u2Dpl);
2526 }
2527 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2528 {
2529 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2530 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2531 Assert(pCtx->ss.Attr.n.u1Present);
2532 Assert(!(pCtx->ss.Attr.u & 0xf00));
2533 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2534 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2535 || !(pCtx->ss.Attr.n.u1Granularity));
2536 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2537 || (pCtx->ss.Attr.n.u1Granularity));
2538 }
2539 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2540 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2541 {
2542 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2543 Assert(pCtx->ds.Attr.n.u1Present);
2544 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2545 Assert(!(pCtx->ds.Attr.u & 0xf00));
2546 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2547 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2548 || !(pCtx->ds.Attr.n.u1Granularity));
2549 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2550 || (pCtx->ds.Attr.n.u1Granularity));
2551 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2552 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2553 }
2554 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2555 {
2556 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2557 Assert(pCtx->es.Attr.n.u1Present);
2558 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2559 Assert(!(pCtx->es.Attr.u & 0xf00));
2560 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2561 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2562 || !(pCtx->es.Attr.n.u1Granularity));
2563 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2564 || (pCtx->es.Attr.n.u1Granularity));
2565 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2566 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2567 }
2568 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2569 {
2570 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2571 Assert(pCtx->fs.Attr.n.u1Present);
2572 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2573 Assert(!(pCtx->fs.Attr.u & 0xf00));
2574 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2575 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2576 || !(pCtx->fs.Attr.n.u1Granularity));
2577 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2578 || (pCtx->fs.Attr.n.u1Granularity));
2579 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2580 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2581 }
2582 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2583 {
2584 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2585 Assert(pCtx->gs.Attr.n.u1Present);
2586 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2587 Assert(!(pCtx->gs.Attr.u & 0xf00));
2588 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2589 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2590 || !(pCtx->gs.Attr.n.u1Granularity));
2591 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2592 || (pCtx->gs.Attr.n.u1Granularity));
2593 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2594 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2595 }
2596 /* 64-bit capable CPUs. */
2597 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2598 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2599 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2600 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2601 }
2602 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2603 || ( CPUMIsGuestInRealModeEx(pCtx)
2604 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2605 {
2606 /* Real and v86 mode checks. */
2607 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2608 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2609#ifndef IN_NEM_DARWIN
2610 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2611 {
2612 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2613 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2614 }
2615 else
2616#endif
2617 {
2618 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2619 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2620 }
2621
2622 /* CS */
2623 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2624 Assert(pCtx->cs.u32Limit == 0xffff);
2625 Assert(u32CSAttr == 0xf3);
2626 /* SS */
2627 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2628 Assert(pCtx->ss.u32Limit == 0xffff);
2629 Assert(u32SSAttr == 0xf3);
2630 /* DS */
2631 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2632 Assert(pCtx->ds.u32Limit == 0xffff);
2633 Assert(u32DSAttr == 0xf3);
2634 /* ES */
2635 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2636 Assert(pCtx->es.u32Limit == 0xffff);
2637 Assert(u32ESAttr == 0xf3);
2638 /* FS */
2639 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2640 Assert(pCtx->fs.u32Limit == 0xffff);
2641 Assert(u32FSAttr == 0xf3);
2642 /* GS */
2643 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2644 Assert(pCtx->gs.u32Limit == 0xffff);
2645 Assert(u32GSAttr == 0xf3);
2646 /* 64-bit capable CPUs. */
2647 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2648 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2649 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2650 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2651 }
2652}
2653#endif /* VBOX_STRICT */
2654
2655
2656/**
2657 * Exports a guest segment register into the guest-state area in the VMCS.
2658 *
2659 * @returns VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure.
2661 * @param pVmcsInfo The VMCS info. object.
2662 * @param iSegReg The segment register number (X86_SREG_XXX).
2663 * @param pSelReg Pointer to the segment selector.
2664 *
2665 * @remarks No-long-jump zone!!!
2666 */
2667static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2668{
2669 Assert(iSegReg < X86_SREG_COUNT);
2670
2671 uint32_t u32Access = pSelReg->Attr.u;
2672#ifndef IN_NEM_DARWIN
2673 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2674#endif
2675 {
2676 /*
2677 * The way to differentiate between whether this is really a null selector or was just
2678 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2679 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2680 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2681 * NULL selectors loaded in protected-mode have their attribute as 0.
2682 */
2683 if (u32Access)
2684 { }
2685 else
2686 u32Access = X86DESCATTR_UNUSABLE;
2687 }
2688#ifndef IN_NEM_DARWIN
2689 else
2690 {
2691 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2692 u32Access = 0xf3;
2693 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2694 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2695 RT_NOREF_PV(pVCpu);
2696 }
2697#else
2698 RT_NOREF(pVmcsInfo);
2699#endif
2700
2701 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2702 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2703 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2704
2705 /*
2706 * Commit it to the VMCS.
2707 */
2708 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2709 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2710 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2711 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2712 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2713 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2714 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2715 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2716 return VINF_SUCCESS;
2717}
2718
2719
2720/**
2721 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2722 * area in the VMCS.
2723 *
2724 * @returns VBox status code.
2725 * @param pVCpu The cross context virtual CPU structure.
2726 * @param pVmxTransient The VMX-transient structure.
2727 *
2728 * @remarks Will import guest CR0 on strict builds during validation of
2729 * segments.
2730 * @remarks No-long-jump zone!!!
2731 */
2732static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2733{
2734 int rc = VERR_INTERNAL_ERROR_5;
2735#ifndef IN_NEM_DARWIN
2736 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2737#endif
2738 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2739 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2740#ifndef IN_NEM_DARWIN
2741 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2742#endif
2743
2744 /*
2745 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2746 */
2747 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2748 {
2749 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2750 {
2751 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2752#ifndef IN_NEM_DARWIN
2753 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2754 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2755#endif
2756 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2757 AssertRC(rc);
2758 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2759 }
2760
2761 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2762 {
2763 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2764#ifndef IN_NEM_DARWIN
2765 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2766 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2767#endif
2768 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2769 AssertRC(rc);
2770 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2771 }
2772
2773 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2774 {
2775 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2776#ifndef IN_NEM_DARWIN
2777 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2778 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2779#endif
2780 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2781 AssertRC(rc);
2782 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2783 }
2784
2785 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2786 {
2787 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2788#ifndef IN_NEM_DARWIN
2789 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2790 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2791#endif
2792 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2793 AssertRC(rc);
2794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2795 }
2796
2797 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2798 {
2799 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2800#ifndef IN_NEM_DARWIN
2801 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2802 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2803#endif
2804 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2805 AssertRC(rc);
2806 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2807 }
2808
2809 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2810 {
2811 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2812#ifndef IN_NEM_DARWIN
2813 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2814 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2815#endif
2816 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2817 AssertRC(rc);
2818 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2819 }
2820
2821#ifdef VBOX_STRICT
2822 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2823#endif
2824 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2825 pCtx->cs.Attr.u));
2826 }
2827
2828 /*
2829 * Guest TR.
2830 */
2831 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2832 {
2833 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2834
2835 /*
2836 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2837 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2838 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2839 */
2840 uint16_t u16Sel;
2841 uint32_t u32Limit;
2842 uint64_t u64Base;
2843 uint32_t u32AccessRights;
2844#ifndef IN_NEM_DARWIN
2845 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2846#endif
2847 {
2848 u16Sel = pCtx->tr.Sel;
2849 u32Limit = pCtx->tr.u32Limit;
2850 u64Base = pCtx->tr.u64Base;
2851 u32AccessRights = pCtx->tr.Attr.u;
2852 }
2853#ifndef IN_NEM_DARWIN
2854 else
2855 {
2856 Assert(!pVmxTransient->fIsNestedGuest);
2857 Assert(pVM->hm.s.vmx.pRealModeTSS);
2858 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2859
2860 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2861 RTGCPHYS GCPhys;
2862 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2863 AssertRCReturn(rc, rc);
2864
2865 X86DESCATTR DescAttr;
2866 DescAttr.u = 0;
2867 DescAttr.n.u1Present = 1;
2868 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2869
2870 u16Sel = 0;
2871 u32Limit = HM_VTX_TSS_SIZE;
2872 u64Base = GCPhys;
2873 u32AccessRights = DescAttr.u;
2874 }
2875#endif
2876
2877 /* Validate. */
2878 Assert(!(u16Sel & RT_BIT(2)));
2879 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2880 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2881 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2882 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2883 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2884 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2885 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2886 Assert( (u32Limit & 0xfff) == 0xfff
2887 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2888 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2889 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2890
2891 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2892 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2893 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2895
2896 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2897 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2898 }
2899
2900 /*
2901 * Guest GDTR.
2902 */
2903 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2904 {
2905 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2906
2907 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2908 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2909
2910 /* Validate. */
2911 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2912
2913 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2914 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2915 }
2916
2917 /*
2918 * Guest LDTR.
2919 */
2920 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2921 {
2922 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2923
2924 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2925 uint32_t u32Access;
2926 if ( !pVmxTransient->fIsNestedGuest
2927 && !pCtx->ldtr.Attr.u)
2928 u32Access = X86DESCATTR_UNUSABLE;
2929 else
2930 u32Access = pCtx->ldtr.Attr.u;
2931
2932 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2935 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2936
2937 /* Validate. */
2938 if (!(u32Access & X86DESCATTR_UNUSABLE))
2939 {
2940 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2941 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2942 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2943 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2944 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2945 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2946 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2947 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2948 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2949 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2950 }
2951
2952 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2953 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2954 }
2955
2956 /*
2957 * Guest IDTR.
2958 */
2959 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2960 {
2961 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2962
2963 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2964 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2965
2966 /* Validate. */
2967 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2968
2969 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2970 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2971 }
2972
2973 return VINF_SUCCESS;
2974}
2975
2976
2977/**
2978 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2979 * VM-exit interruption info type.
2980 *
2981 * @returns The IEM exception flags.
2982 * @param uVector The event vector.
2983 * @param uVmxEventType The VMX event type.
2984 *
2985 * @remarks This function currently only constructs flags required for
2986 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2987 * and CR2 aspects of an exception are not included).
2988 */
2989static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2990{
2991 uint32_t fIemXcptFlags;
2992 switch (uVmxEventType)
2993 {
2994 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2995 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2996 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2997 break;
2998
2999 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
3000 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
3001 break;
3002
3003 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3004 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3005 break;
3006
3007 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3008 {
3009 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3010 if (uVector == X86_XCPT_BP)
3011 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3012 else if (uVector == X86_XCPT_OF)
3013 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3014 else
3015 {
3016 fIemXcptFlags = 0;
3017 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3018 }
3019 break;
3020 }
3021
3022 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3023 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3024 break;
3025
3026 default:
3027 fIemXcptFlags = 0;
3028 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3029 break;
3030 }
3031 return fIemXcptFlags;
3032}
3033
3034
3035/**
3036 * Sets an event as a pending event to be injected into the guest.
3037 *
3038 * @param pVCpu The cross context virtual CPU structure.
3039 * @param u32IntInfo The VM-entry interruption-information field.
3040 * @param cbInstr The VM-entry instruction length in bytes (for
3041 * software interrupts, exceptions and privileged
3042 * software exceptions).
3043 * @param u32ErrCode The VM-entry exception error code.
3044 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3045 * page-fault.
3046 */
3047DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3048 RTGCUINTPTR GCPtrFaultAddress)
3049{
3050 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3051 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3052 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3053 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3054 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3055 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3056}
3057
3058
3059/**
3060 * Sets an external interrupt as pending-for-injection into the VM.
3061 *
3062 * @param pVCpu The cross context virtual CPU structure.
3063 * @param u8Interrupt The external interrupt vector.
3064 */
3065DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105/**
3106 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3107 *
3108 * @param pVCpu The cross context virtual CPU structure.
3109 */
3110DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3111{
3112 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3113 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3114 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3116 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3117}
3118
3119
3120/**
3121 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3122 *
3123 * @param pVCpu The cross context virtual CPU structure.
3124 */
3125DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3126{
3127 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3129 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3130 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3131 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3132}
3133
3134
3135#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3136/**
3137 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3138 *
3139 * @param pVCpu The cross context virtual CPU structure.
3140 * @param u32ErrCode The error code for the general-protection exception.
3141 */
3142DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3143{
3144 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3145 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3146 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3147 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3148 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3149}
3150
3151
3152/**
3153 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3154 *
3155 * @param pVCpu The cross context virtual CPU structure.
3156 * @param u32ErrCode The error code for the stack exception.
3157 */
3158DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3159{
3160 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3161 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3162 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3163 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3164 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3165}
3166#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3167
3168
3169/**
3170 * Fixes up attributes for the specified segment register.
3171 *
3172 * @param pVCpu The cross context virtual CPU structure.
3173 * @param pSelReg The segment register that needs fixing.
3174 * @param pszRegName The register name (for logging and assertions).
3175 */
3176static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3177{
3178 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3179
3180 /*
3181 * If VT-x marks the segment as unusable, most other bits remain undefined:
3182 * - For CS the L, D and G bits have meaning.
3183 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3184 * - For the remaining data segments no bits are defined.
3185 *
3186 * The present bit and the unusable bit has been observed to be set at the
3187 * same time (the selector was supposed to be invalid as we started executing
3188 * a V8086 interrupt in ring-0).
3189 *
3190 * What should be important for the rest of the VBox code, is that the P bit is
3191 * cleared. Some of the other VBox code recognizes the unusable bit, but
3192 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3193 * safe side here, we'll strip off P and other bits we don't care about. If
3194 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3195 *
3196 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3197 */
3198#ifdef VBOX_STRICT
3199 uint32_t const uAttr = pSelReg->Attr.u;
3200#endif
3201
3202 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3203 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3204 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3205
3206#ifdef VBOX_STRICT
3207# ifndef IN_NEM_DARWIN
3208 VMMRZCallRing3Disable(pVCpu);
3209# endif
3210 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3211# ifdef DEBUG_bird
3212 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3213 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3214 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3215# endif
3216# ifndef IN_NEM_DARWIN
3217 VMMRZCallRing3Enable(pVCpu);
3218# endif
3219 NOREF(uAttr);
3220#endif
3221 RT_NOREF2(pVCpu, pszRegName);
3222}
3223
3224
3225/**
3226 * Imports a guest segment register from the current VMCS into the guest-CPU
3227 * context.
3228 *
3229 * @param pVCpu The cross context virtual CPU structure.
3230 * @param iSegReg The segment register number (X86_SREG_XXX).
3231 *
3232 * @remarks Called with interrupts and/or preemption disabled.
3233 */
3234static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3235{
3236 Assert(iSegReg < X86_SREG_COUNT);
3237 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3238 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3239 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3240 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3241
3242 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3243
3244 uint16_t u16Sel;
3245 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3246 pSelReg->Sel = u16Sel;
3247 pSelReg->ValidSel = u16Sel;
3248
3249 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3250 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3251
3252 uint32_t u32Attr;
3253 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3254 pSelReg->Attr.u = u32Attr;
3255 if (u32Attr & X86DESCATTR_UNUSABLE)
3256 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3257
3258 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3259}
3260
3261
3262/**
3263 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3264 *
3265 * @param pVCpu The cross context virtual CPU structure.
3266 *
3267 * @remarks Called with interrupts and/or preemption disabled.
3268 */
3269static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3270{
3271 uint16_t u16Sel;
3272 uint64_t u64Base;
3273 uint32_t u32Limit, u32Attr;
3274 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3275 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3276 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3277 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3278
3279 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3280 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3281 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3282 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3283 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3284 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3285 if (u32Attr & X86DESCATTR_UNUSABLE)
3286 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3287}
3288
3289
3290/**
3291 * Imports the guest TR from the current VMCS into the guest-CPU context.
3292 *
3293 * @param pVCpu The cross context virtual CPU structure.
3294 *
3295 * @remarks Called with interrupts and/or preemption disabled.
3296 */
3297static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3298{
3299 uint16_t u16Sel;
3300 uint64_t u64Base;
3301 uint32_t u32Limit, u32Attr;
3302 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3303 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3304 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3305 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3306
3307 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3308 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3309 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3310 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3311 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3312 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3313 /* TR is the only selector that can never be unusable. */
3314 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 uint64_t u64Val;
3330 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3331 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3332 {
3333 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3334 AssertRC(rc);
3335
3336 pCtx->rip = u64Val;
3337 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3338 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3339 }
3340}
3341
3342
3343/**
3344 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3345 *
3346 * @param pVCpu The cross context virtual CPU structure.
3347 * @param pVmcsInfo The VMCS info. object.
3348 *
3349 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3350 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3351 * instead!!!
3352 */
3353static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3354{
3355 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3356 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3357 {
3358 uint64_t u64Val;
3359 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3360 AssertRC(rc);
3361
3362 pCtx->rflags.u64 = u64Val;
3363#ifndef IN_NEM_DARWIN
3364 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3365 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3366 {
3367 pCtx->eflags.Bits.u1VM = 0;
3368 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3369 }
3370#else
3371 RT_NOREF(pVmcsInfo);
3372#endif
3373 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3374 }
3375}
3376
3377
3378/**
3379 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3380 * context.
3381 *
3382 * @param pVCpu The cross context virtual CPU structure.
3383 * @param pVmcsInfo The VMCS info. object.
3384 *
3385 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3386 * do not log!
3387 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3388 * instead!!!
3389 */
3390static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3391{
3392 uint32_t u32Val;
3393 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3394 if (!u32Val)
3395 {
3396 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3397 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3398 CPUMSetGuestNmiBlocking(pVCpu, false);
3399 }
3400 else
3401 {
3402 /*
3403 * We must import RIP here to set our EM interrupt-inhibited state.
3404 * We also import RFLAGS as our code that evaluates pending interrupts
3405 * before VM-entry requires it.
3406 */
3407 vmxHCImportGuestRip(pVCpu);
3408 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3409
3410 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3411 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3412 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3413 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3414
3415 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3416 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3417 }
3418}
3419
3420
3421/**
3422 * Worker for VMXR0ImportStateOnDemand.
3423 *
3424 * @returns VBox status code.
3425 * @param pVCpu The cross context virtual CPU structure.
3426 * @param pVmcsInfo The VMCS info. object.
3427 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3428 */
3429static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3430{
3431 int rc = VINF_SUCCESS;
3432 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3433 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3434 uint32_t u32Val;
3435
3436 /*
3437 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3438 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3439 * neither are other host platforms.
3440 *
3441 * Committing this temporarily as it prevents BSOD.
3442 *
3443 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3444 */
3445# ifdef RT_OS_WINDOWS
3446 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3447 return VERR_HM_IPE_1;
3448# endif
3449
3450 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3451
3452#ifndef IN_NEM_DARWIN
3453 /*
3454 * We disable interrupts to make the updating of the state and in particular
3455 * the fExtrn modification atomic wrt to preemption hooks.
3456 */
3457 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3458#endif
3459
3460 fWhat &= pCtx->fExtrn;
3461 if (fWhat)
3462 {
3463 do
3464 {
3465 if (fWhat & CPUMCTX_EXTRN_RIP)
3466 vmxHCImportGuestRip(pVCpu);
3467
3468 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3469 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3470
3471 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3472 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3473
3474 if (fWhat & CPUMCTX_EXTRN_RSP)
3475 {
3476 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3477 AssertRC(rc);
3478 }
3479
3480 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3481 {
3482 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3483#ifndef IN_NEM_DARWIN
3484 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3485#else
3486 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3487#endif
3488 if (fWhat & CPUMCTX_EXTRN_CS)
3489 {
3490 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3491 vmxHCImportGuestRip(pVCpu);
3492 if (fRealOnV86Active)
3493 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3494 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3495 }
3496 if (fWhat & CPUMCTX_EXTRN_SS)
3497 {
3498 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3499 if (fRealOnV86Active)
3500 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3501 }
3502 if (fWhat & CPUMCTX_EXTRN_DS)
3503 {
3504 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3505 if (fRealOnV86Active)
3506 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_ES)
3509 {
3510 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3511 if (fRealOnV86Active)
3512 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3513 }
3514 if (fWhat & CPUMCTX_EXTRN_FS)
3515 {
3516 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3517 if (fRealOnV86Active)
3518 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3519 }
3520 if (fWhat & CPUMCTX_EXTRN_GS)
3521 {
3522 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3523 if (fRealOnV86Active)
3524 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3525 }
3526 }
3527
3528 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3529 {
3530 if (fWhat & CPUMCTX_EXTRN_LDTR)
3531 vmxHCImportGuestLdtr(pVCpu);
3532
3533 if (fWhat & CPUMCTX_EXTRN_GDTR)
3534 {
3535 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3536 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3537 pCtx->gdtr.cbGdt = u32Val;
3538 }
3539
3540 /* Guest IDTR. */
3541 if (fWhat & CPUMCTX_EXTRN_IDTR)
3542 {
3543 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3544 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3545 pCtx->idtr.cbIdt = u32Val;
3546 }
3547
3548 /* Guest TR. */
3549 if (fWhat & CPUMCTX_EXTRN_TR)
3550 {
3551#ifndef IN_NEM_DARWIN
3552 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3553 don't need to import that one. */
3554 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3555#endif
3556 vmxHCImportGuestTr(pVCpu);
3557 }
3558 }
3559
3560 if (fWhat & CPUMCTX_EXTRN_DR7)
3561 {
3562#ifndef IN_NEM_DARWIN
3563 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3564#endif
3565 {
3566 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3567 AssertRC(rc);
3568 }
3569 }
3570
3571 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3572 {
3573 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3574 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3575 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3576 pCtx->SysEnter.cs = u32Val;
3577 }
3578
3579#ifndef IN_NEM_DARWIN
3580 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3581 {
3582 if ( pVM->hmr0.s.fAllow64BitGuests
3583 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3584 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3585 }
3586
3587 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3588 {
3589 if ( pVM->hmr0.s.fAllow64BitGuests
3590 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3591 {
3592 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3593 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3594 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3595 }
3596 }
3597
3598 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3599 {
3600 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3601 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3602 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3603 Assert(pMsrs);
3604 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3605 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3606 for (uint32_t i = 0; i < cMsrs; i++)
3607 {
3608 uint32_t const idMsr = pMsrs[i].u32Msr;
3609 switch (idMsr)
3610 {
3611 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3612 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3613 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3614 default:
3615 {
3616 uint32_t idxLbrMsr;
3617 if (VM_IS_VMX_LBR(pVM))
3618 {
3619 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3620 {
3621 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3622 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3623 break;
3624 }
3625 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3626 {
3627 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3628 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3629 break;
3630 }
3631 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3632 {
3633 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3634 break;
3635 }
3636 /* Fallthru (no break) */
3637 }
3638 pCtx->fExtrn = 0;
3639 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3640 ASMSetFlags(fEFlags);
3641 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3642 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3643 }
3644 }
3645 }
3646 }
3647#endif
3648
3649 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3650 {
3651 if (fWhat & CPUMCTX_EXTRN_CR0)
3652 {
3653 uint64_t u64Cr0;
3654 uint64_t u64Shadow;
3655 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3656 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3657#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3658 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3659 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3660#else
3661 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3662 {
3663 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3664 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3665 }
3666 else
3667 {
3668 /*
3669 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3670 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3671 * re-construct CR0. See @bugref{9180#c95} for details.
3672 */
3673 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3674 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3675 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3676 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3677 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3678 }
3679#endif
3680#ifndef IN_NEM_DARWIN
3681 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3682#endif
3683 CPUMSetGuestCR0(pVCpu, u64Cr0);
3684#ifndef IN_NEM_DARWIN
3685 VMMRZCallRing3Enable(pVCpu);
3686#endif
3687 }
3688
3689 if (fWhat & CPUMCTX_EXTRN_CR4)
3690 {
3691 uint64_t u64Cr4;
3692 uint64_t u64Shadow;
3693 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3694 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3695#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3696 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3697 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3698#else
3699 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3700 {
3701 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3702 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3703 }
3704 else
3705 {
3706 /*
3707 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3708 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3709 * re-construct CR4. See @bugref{9180#c95} for details.
3710 */
3711 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3712 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3713 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3714 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3715 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3716 }
3717#endif
3718 pCtx->cr4 = u64Cr4;
3719 }
3720
3721 if (fWhat & CPUMCTX_EXTRN_CR3)
3722 {
3723 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3724 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3725 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3726 && CPUMIsGuestPagingEnabledEx(pCtx)))
3727 {
3728 uint64_t u64Cr3;
3729 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3730 if (pCtx->cr3 != u64Cr3)
3731 {
3732 pCtx->cr3 = u64Cr3;
3733 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3734 }
3735
3736 /*
3737 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3738 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3739 */
3740 if (CPUMIsGuestInPAEModeEx(pCtx))
3741 {
3742 X86PDPE aPaePdpes[4];
3743 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3744 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3746 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3747 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3748 {
3749 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3750 /* PGM now updates PAE PDPTEs while updating CR3. */
3751 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3752 }
3753 }
3754 }
3755 }
3756 }
3757
3758#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3759 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3760 {
3761 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3762 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3763 {
3764 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3765 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3766 if (RT_SUCCESS(rc))
3767 { /* likely */ }
3768 else
3769 break;
3770 }
3771 }
3772#endif
3773 } while (0);
3774
3775 if (RT_SUCCESS(rc))
3776 {
3777 /* Update fExtrn. */
3778 pCtx->fExtrn &= ~fWhat;
3779
3780 /* If everything has been imported, clear the HM keeper bit. */
3781 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3782 {
3783#ifndef IN_NEM_DARWIN
3784 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3785#else
3786 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3787#endif
3788 Assert(!pCtx->fExtrn);
3789 }
3790 }
3791 }
3792#ifndef IN_NEM_DARWIN
3793 else
3794 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3795
3796 /*
3797 * Restore interrupts.
3798 */
3799 ASMSetFlags(fEFlags);
3800#endif
3801
3802 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3803
3804 if (RT_SUCCESS(rc))
3805 { /* likely */ }
3806 else
3807 return rc;
3808
3809 /*
3810 * Honor any pending CR3 updates.
3811 *
3812 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3813 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3814 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3815 *
3816 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3817 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3818 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3819 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3820 *
3821 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3822 *
3823 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3824 */
3825 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3826#ifndef IN_NEM_DARWIN
3827 && VMMRZCallRing3IsEnabled(pVCpu)
3828#endif
3829 )
3830 {
3831 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3832 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3833 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3834 }
3835
3836 return VINF_SUCCESS;
3837}
3838
3839
3840/**
3841 * Check per-VM and per-VCPU force flag actions that require us to go back to
3842 * ring-3 for one reason or another.
3843 *
3844 * @returns Strict VBox status code (i.e. informational status codes too)
3845 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3846 * ring-3.
3847 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3848 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3849 * interrupts)
3850 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3851 * all EMTs to be in ring-3.
3852 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3853 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3854 * to the EM loop.
3855 *
3856 * @param pVCpu The cross context virtual CPU structure.
3857 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3858 * @param fStepping Whether we are single-stepping the guest using the
3859 * hypervisor debugger.
3860 *
3861 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3862 * is no longer in VMX non-root mode.
3863 */
3864static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3865{
3866#ifndef IN_NEM_DARWIN
3867 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3868#endif
3869
3870 /*
3871 * Update pending interrupts into the APIC's IRR.
3872 */
3873 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3874 APICUpdatePendingInterrupts(pVCpu);
3875
3876 /*
3877 * Anything pending? Should be more likely than not if we're doing a good job.
3878 */
3879 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3880 if ( !fStepping
3881 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3882 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3883 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3884 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3885 return VINF_SUCCESS;
3886
3887 /* Pending PGM C3 sync. */
3888 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3889 {
3890 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3891 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3892 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3893 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3894 if (rcStrict != VINF_SUCCESS)
3895 {
3896 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3897 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3898 return rcStrict;
3899 }
3900 }
3901
3902 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3903 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3904 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3905 {
3906 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3907 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3908 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
3909 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
3910 return rc;
3911 }
3912
3913 /* Pending VM request packets, such as hardware interrupts. */
3914 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3915 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3916 {
3917 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3918 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3919 return VINF_EM_PENDING_REQUEST;
3920 }
3921
3922 /* Pending PGM pool flushes. */
3923 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3924 {
3925 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3926 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3927 return VINF_PGM_POOL_FLUSH_PENDING;
3928 }
3929
3930 /* Pending DMA requests. */
3931 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3932 {
3933 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3934 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3935 return VINF_EM_RAW_TO_R3;
3936 }
3937
3938#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3939 /*
3940 * Pending nested-guest events.
3941 *
3942 * Please note the priority of these events are specified and important.
3943 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3944 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3945 */
3946 if (fIsNestedGuest)
3947 {
3948 /* Pending nested-guest APIC-write. */
3949 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3950 {
3951 Log4Func(("Pending nested-guest APIC-write\n"));
3952 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3953 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3954 return rcStrict;
3955 }
3956
3957 /* Pending nested-guest monitor-trap flag (MTF). */
3958 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3959 {
3960 Log4Func(("Pending nested-guest MTF\n"));
3961 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3962 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3963 return rcStrict;
3964 }
3965
3966 /* Pending nested-guest VMX-preemption timer expired. */
3967 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3968 {
3969 Log4Func(("Pending nested-guest preempt timer\n"));
3970 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3971 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3972 return rcStrict;
3973 }
3974 }
3975#else
3976 NOREF(fIsNestedGuest);
3977#endif
3978
3979 return VINF_SUCCESS;
3980}
3981
3982
3983/**
3984 * Converts any TRPM trap into a pending HM event. This is typically used when
3985 * entering from ring-3 (not longjmp returns).
3986 *
3987 * @param pVCpu The cross context virtual CPU structure.
3988 */
3989static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3990{
3991 Assert(TRPMHasTrap(pVCpu));
3992 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3993
3994 uint8_t uVector;
3995 TRPMEVENT enmTrpmEvent;
3996 uint32_t uErrCode;
3997 RTGCUINTPTR GCPtrFaultAddress;
3998 uint8_t cbInstr;
3999 bool fIcebp;
4000
4001 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4002 AssertRC(rc);
4003
4004 uint32_t u32IntInfo;
4005 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4006 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4007
4008 rc = TRPMResetTrap(pVCpu);
4009 AssertRC(rc);
4010 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4011 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4012
4013 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4014}
4015
4016
4017/**
4018 * Converts the pending HM event into a TRPM trap.
4019 *
4020 * @param pVCpu The cross context virtual CPU structure.
4021 */
4022static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4023{
4024 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4025
4026 /* If a trap was already pending, we did something wrong! */
4027 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4028
4029 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4030 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4031 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4032
4033 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4034
4035 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4036 AssertRC(rc);
4037
4038 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4039 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4040
4041 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4042 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4043 else
4044 {
4045 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4046 switch (uVectorType)
4047 {
4048 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4049 TRPMSetTrapDueToIcebp(pVCpu);
4050 RT_FALL_THRU();
4051 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4052 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4053 {
4054 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4055 || ( uVector == X86_XCPT_BP /* INT3 */
4056 || uVector == X86_XCPT_OF /* INTO */
4057 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4058 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4059 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4060 break;
4061 }
4062 }
4063 }
4064
4065 /* We're now done converting the pending event. */
4066 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4067}
4068
4069
4070/**
4071 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4072 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4073 *
4074 * @param pVCpu The cross context virtual CPU structure.
4075 * @param pVmcsInfo The VMCS info. object.
4076 */
4077static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4078{
4079 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4080 {
4081 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4082 {
4083 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4084 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4085 AssertRC(rc);
4086 }
4087 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4088}
4089
4090
4091/**
4092 * Clears the interrupt-window exiting control in the VMCS.
4093 *
4094 * @param pVCpu The cross context virtual CPU structure.
4095 * @param pVmcsInfo The VMCS info. object.
4096 */
4097DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4098{
4099 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4100 {
4101 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4102 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4103 AssertRC(rc);
4104 }
4105}
4106
4107
4108/**
4109 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4110 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4111 *
4112 * @param pVCpu The cross context virtual CPU structure.
4113 * @param pVmcsInfo The VMCS info. object.
4114 */
4115static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4116{
4117 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4118 {
4119 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4120 {
4121 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4122 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4123 AssertRC(rc);
4124 Log4Func(("Setup NMI-window exiting\n"));
4125 }
4126 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4127}
4128
4129
4130/**
4131 * Clears the NMI-window exiting control in the VMCS.
4132 *
4133 * @param pVCpu The cross context virtual CPU structure.
4134 * @param pVmcsInfo The VMCS info. object.
4135 */
4136DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4137{
4138 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4139 {
4140 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4141 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4142 AssertRC(rc);
4143 }
4144}
4145
4146
4147/**
4148 * Injects an event into the guest upon VM-entry by updating the relevant fields
4149 * in the VM-entry area in the VMCS.
4150 *
4151 * @returns Strict VBox status code (i.e. informational status codes too).
4152 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4153 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4154 *
4155 * @param pVCpu The cross context virtual CPU structure.
4156 * @param pVmcsInfo The VMCS info object.
4157 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4158 * @param pEvent The event being injected.
4159 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4160 * will be updated if necessary. This cannot not be NULL.
4161 * @param fStepping Whether we're single-stepping guest execution and should
4162 * return VINF_EM_DBG_STEPPED if the event is injected
4163 * directly (registers modified by us, not by hardware on
4164 * VM-entry).
4165 */
4166static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4167 bool fStepping, uint32_t *pfIntrState)
4168{
4169 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4170 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4171 Assert(pfIntrState);
4172
4173#ifdef IN_NEM_DARWIN
4174 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4175#endif
4176
4177 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4178 uint32_t u32IntInfo = pEvent->u64IntInfo;
4179 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4180 uint32_t const cbInstr = pEvent->cbInstr;
4181 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4182 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4183 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4184
4185#ifdef VBOX_STRICT
4186 /*
4187 * Validate the error-code-valid bit for hardware exceptions.
4188 * No error codes for exceptions in real-mode.
4189 *
4190 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4191 */
4192 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4193 && !CPUMIsGuestInRealModeEx(pCtx))
4194 {
4195 switch (uVector)
4196 {
4197 case X86_XCPT_PF:
4198 case X86_XCPT_DF:
4199 case X86_XCPT_TS:
4200 case X86_XCPT_NP:
4201 case X86_XCPT_SS:
4202 case X86_XCPT_GP:
4203 case X86_XCPT_AC:
4204 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4205 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4206 RT_FALL_THRU();
4207 default:
4208 break;
4209 }
4210 }
4211
4212 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4213 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4214 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4215#endif
4216
4217 RT_NOREF(uVector);
4218 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4219 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4220 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4221 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4222 {
4223 Assert(uVector <= X86_XCPT_LAST);
4224 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4225 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4226 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4227 }
4228 else
4229 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4230
4231 /*
4232 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4233 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4234 * interrupt handler in the (real-mode) guest.
4235 *
4236 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4237 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4238 */
4239 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4240 {
4241#ifndef IN_NEM_DARWIN
4242 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4243#endif
4244 {
4245 /*
4246 * For CPUs with unrestricted guest execution enabled and with the guest
4247 * in real-mode, we must not set the deliver-error-code bit.
4248 *
4249 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4250 */
4251 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4252 }
4253#ifndef IN_NEM_DARWIN
4254 else
4255 {
4256 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4257 Assert(PDMVmmDevHeapIsEnabled(pVM));
4258 Assert(pVM->hm.s.vmx.pRealModeTSS);
4259 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4260
4261 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4262 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4263 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4264 AssertRCReturn(rc2, rc2);
4265
4266 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4267 size_t const cbIdtEntry = sizeof(X86IDTR16);
4268 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4269 {
4270 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4271 if (uVector == X86_XCPT_DF)
4272 return VINF_EM_RESET;
4273
4274 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4275 No error codes for exceptions in real-mode. */
4276 if (uVector == X86_XCPT_GP)
4277 {
4278 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4279 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4280 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4281 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4282 HMEVENT EventXcptDf;
4283 RT_ZERO(EventXcptDf);
4284 EventXcptDf.u64IntInfo = uXcptDfInfo;
4285 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4286 }
4287
4288 /*
4289 * If we're injecting an event with no valid IDT entry, inject a #GP.
4290 * No error codes for exceptions in real-mode.
4291 *
4292 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4293 */
4294 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4295 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4296 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4297 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4298 HMEVENT EventXcptGp;
4299 RT_ZERO(EventXcptGp);
4300 EventXcptGp.u64IntInfo = uXcptGpInfo;
4301 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4302 }
4303
4304 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4305 uint16_t uGuestIp = pCtx->ip;
4306 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4307 {
4308 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4309 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4310 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4311 }
4312 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4313 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4314
4315 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4316 X86IDTR16 IdtEntry;
4317 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4318 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4319 AssertRCReturn(rc2, rc2);
4320
4321 /* Construct the stack frame for the interrupt/exception handler. */
4322 VBOXSTRICTRC rcStrict;
4323 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4324 if (rcStrict == VINF_SUCCESS)
4325 {
4326 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4327 if (rcStrict == VINF_SUCCESS)
4328 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4329 }
4330
4331 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4332 if (rcStrict == VINF_SUCCESS)
4333 {
4334 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4335 pCtx->rip = IdtEntry.offSel;
4336 pCtx->cs.Sel = IdtEntry.uSel;
4337 pCtx->cs.ValidSel = IdtEntry.uSel;
4338 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4339 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4340 && uVector == X86_XCPT_PF)
4341 pCtx->cr2 = GCPtrFault;
4342
4343 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4344 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4345 | HM_CHANGED_GUEST_RSP);
4346
4347 /*
4348 * If we delivered a hardware exception (other than an NMI) and if there was
4349 * block-by-STI in effect, we should clear it.
4350 */
4351 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4352 {
4353 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4354 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4355 Log4Func(("Clearing inhibition due to STI\n"));
4356 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4357 }
4358
4359 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4360 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4361
4362 /*
4363 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4364 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4365 */
4366 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4367
4368 /*
4369 * If we eventually support nested-guest execution without unrestricted guest execution,
4370 * we should set fInterceptEvents here.
4371 */
4372 Assert(!fIsNestedGuest);
4373
4374 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4375 if (fStepping)
4376 rcStrict = VINF_EM_DBG_STEPPED;
4377 }
4378 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4379 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4380 return rcStrict;
4381 }
4382#else
4383 RT_NOREF(pVmcsInfo);
4384#endif
4385 }
4386
4387 /*
4388 * Validate.
4389 */
4390 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4391 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4392
4393 /*
4394 * Inject the event into the VMCS.
4395 */
4396 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4397 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4398 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4399 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4400 AssertRC(rc);
4401
4402 /*
4403 * Update guest CR2 if this is a page-fault.
4404 */
4405 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4406 pCtx->cr2 = GCPtrFault;
4407
4408 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4409 return VINF_SUCCESS;
4410}
4411
4412
4413/**
4414 * Evaluates the event to be delivered to the guest and sets it as the pending
4415 * event.
4416 *
4417 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4418 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4419 * NOT restore these force-flags.
4420 *
4421 * @returns Strict VBox status code (i.e. informational status codes too).
4422 * @param pVCpu The cross context virtual CPU structure.
4423 * @param pVmcsInfo The VMCS information structure.
4424 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4425 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4426 */
4427static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4428{
4429 Assert(pfIntrState);
4430 Assert(!TRPMHasTrap(pVCpu));
4431
4432 /*
4433 * Compute/update guest-interruptibility state related FFs.
4434 * The FFs will be used below while evaluating events to be injected.
4435 */
4436 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4437
4438 /*
4439 * Evaluate if a new event needs to be injected.
4440 * An event that's already pending has already performed all necessary checks.
4441 */
4442 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4443 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4444 {
4445 /** @todo SMI. SMIs take priority over NMIs. */
4446
4447 /*
4448 * NMIs.
4449 * NMIs take priority over external interrupts.
4450 */
4451#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4452 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4453#endif
4454 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4455 {
4456 /*
4457 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4458 *
4459 * For a nested-guest, the FF always indicates the outer guest's ability to
4460 * receive an NMI while the guest-interruptibility state bit depends on whether
4461 * the nested-hypervisor is using virtual-NMIs.
4462 */
4463 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4464 {
4465#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4466 if ( fIsNestedGuest
4467 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4468 return IEMExecVmxVmexitXcptNmi(pVCpu);
4469#endif
4470 vmxHCSetPendingXcptNmi(pVCpu);
4471 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4472 Log4Func(("NMI pending injection\n"));
4473
4474 /* We've injected the NMI, bail. */
4475 return VINF_SUCCESS;
4476 }
4477 else if (!fIsNestedGuest)
4478 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4479 }
4480
4481 /*
4482 * External interrupts (PIC/APIC).
4483 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4484 * We cannot re-request the interrupt from the controller again.
4485 */
4486 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4487 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4488 {
4489 Assert(!DBGFIsStepping(pVCpu));
4490 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4491 AssertRC(rc);
4492
4493 /*
4494 * We must not check EFLAGS directly when executing a nested-guest, use
4495 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4496 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4497 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4498 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4499 *
4500 * See Intel spec. 25.4.1 "Event Blocking".
4501 */
4502 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4503 {
4504#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4505 if ( fIsNestedGuest
4506 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4507 {
4508 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4509 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4510 return rcStrict;
4511 }
4512#endif
4513 uint8_t u8Interrupt;
4514 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4515 if (RT_SUCCESS(rc))
4516 {
4517#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4518 if ( fIsNestedGuest
4519 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4520 {
4521 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4522 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4523 return rcStrict;
4524 }
4525#endif
4526 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4527 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4528 }
4529 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4530 {
4531 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4532
4533 if ( !fIsNestedGuest
4534 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4535 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4536 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4537
4538 /*
4539 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4540 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4541 * need to re-set this force-flag here.
4542 */
4543 }
4544 else
4545 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4546
4547 /* We've injected the interrupt or taken necessary action, bail. */
4548 return VINF_SUCCESS;
4549 }
4550 if (!fIsNestedGuest)
4551 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4552 }
4553 }
4554 else if (!fIsNestedGuest)
4555 {
4556 /*
4557 * An event is being injected or we are in an interrupt shadow. Check if another event is
4558 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4559 * the pending event.
4560 */
4561 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4562 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4563 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4564 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4565 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4566 }
4567 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4568
4569 return VINF_SUCCESS;
4570}
4571
4572
4573/**
4574 * Injects any pending events into the guest if the guest is in a state to
4575 * receive them.
4576 *
4577 * @returns Strict VBox status code (i.e. informational status codes too).
4578 * @param pVCpu The cross context virtual CPU structure.
4579 * @param pVmcsInfo The VMCS information structure.
4580 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4581 * @param fIntrState The VT-x guest-interruptibility state.
4582 * @param fStepping Whether we are single-stepping the guest using the
4583 * hypervisor debugger and should return
4584 * VINF_EM_DBG_STEPPED if the event was dispatched
4585 * directly.
4586 */
4587static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4588 uint32_t fIntrState, bool fStepping)
4589{
4590 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4591#ifndef IN_NEM_DARWIN
4592 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4593#endif
4594
4595#ifdef VBOX_STRICT
4596 /*
4597 * Verify guest-interruptibility state.
4598 *
4599 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4600 * since injecting an event may modify the interruptibility state and we must thus always
4601 * use fIntrState.
4602 */
4603 {
4604 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4605 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4606 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4607 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4608 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4609 Assert(!TRPMHasTrap(pVCpu));
4610 NOREF(fBlockMovSS); NOREF(fBlockSti);
4611 }
4612#endif
4613
4614 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4615 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4616 {
4617 /*
4618 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4619 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4620 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4621 *
4622 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4623 */
4624 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4625#ifdef VBOX_STRICT
4626 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4627 {
4628 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4629 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4630 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4631 }
4632 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4633 {
4634 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4635 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4636 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4637 }
4638#endif
4639 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4640 uIntType));
4641
4642 /*
4643 * Inject the event and get any changes to the guest-interruptibility state.
4644 *
4645 * The guest-interruptibility state may need to be updated if we inject the event
4646 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4647 */
4648 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4649 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4650
4651 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4652 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4653 else
4654 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4655 }
4656
4657 /*
4658 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4659 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4660 */
4661 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4662 && !fIsNestedGuest)
4663 {
4664 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4665
4666 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4667 {
4668 /*
4669 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4670 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4671 */
4672 Assert(!DBGFIsStepping(pVCpu));
4673 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4674 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4675 AssertRC(rc);
4676 }
4677 else
4678 {
4679 /*
4680 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4681 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4682 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4683 * we use MTF, so just make sure it's called before executing guest-code.
4684 */
4685 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4686 }
4687 }
4688 /* else: for nested-guest currently handling while merging controls. */
4689
4690 /*
4691 * Finally, update the guest-interruptibility state.
4692 *
4693 * This is required for the real-on-v86 software interrupt injection, for
4694 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4695 */
4696 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4697 AssertRC(rc);
4698
4699 /*
4700 * There's no need to clear the VM-entry interruption-information field here if we're not
4701 * injecting anything. VT-x clears the valid bit on every VM-exit.
4702 *
4703 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4704 */
4705
4706 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4707 return rcStrict;
4708}
4709
4710
4711/**
4712 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4713 * and update error record fields accordingly.
4714 *
4715 * @returns VMX_IGS_* error codes.
4716 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4717 * wrong with the guest state.
4718 *
4719 * @param pVCpu The cross context virtual CPU structure.
4720 * @param pVmcsInfo The VMCS info. object.
4721 *
4722 * @remarks This function assumes our cache of the VMCS controls
4723 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4724 */
4725static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4726{
4727#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4728#define HMVMX_CHECK_BREAK(expr, err) do { \
4729 if (!(expr)) { uError = (err); break; } \
4730 } while (0)
4731
4732 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4733 uint32_t uError = VMX_IGS_ERROR;
4734 uint32_t u32IntrState = 0;
4735#ifndef IN_NEM_DARWIN
4736 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4737 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4738#else
4739 bool const fUnrestrictedGuest = true;
4740#endif
4741 do
4742 {
4743 int rc;
4744
4745 /*
4746 * Guest-interruptibility state.
4747 *
4748 * Read this first so that any check that fails prior to those that actually
4749 * require the guest-interruptibility state would still reflect the correct
4750 * VMCS value and avoids causing further confusion.
4751 */
4752 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4753 AssertRC(rc);
4754
4755 uint32_t u32Val;
4756 uint64_t u64Val;
4757
4758 /*
4759 * CR0.
4760 */
4761 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4762 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4763 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4764 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4765 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4766 if (fUnrestrictedGuest)
4767 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4768
4769 uint64_t u64GuestCr0;
4770 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4771 AssertRC(rc);
4772 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4773 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4774 if ( !fUnrestrictedGuest
4775 && (u64GuestCr0 & X86_CR0_PG)
4776 && !(u64GuestCr0 & X86_CR0_PE))
4777 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4778
4779 /*
4780 * CR4.
4781 */
4782 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4783 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4784 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4785
4786 uint64_t u64GuestCr4;
4787 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4788 AssertRC(rc);
4789 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4790 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4791
4792 /*
4793 * IA32_DEBUGCTL MSR.
4794 */
4795 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4796 AssertRC(rc);
4797 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4798 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4799 {
4800 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4801 }
4802 uint64_t u64DebugCtlMsr = u64Val;
4803
4804#ifdef VBOX_STRICT
4805 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4806 AssertRC(rc);
4807 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4808#endif
4809 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4810
4811 /*
4812 * RIP and RFLAGS.
4813 */
4814 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4815 AssertRC(rc);
4816 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4817 if ( !fLongModeGuest
4818 || !pCtx->cs.Attr.n.u1Long)
4819 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4820 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4821 * must be identical if the "IA-32e mode guest" VM-entry
4822 * control is 1 and CS.L is 1. No check applies if the
4823 * CPU supports 64 linear-address bits. */
4824
4825 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4826 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4827 AssertRC(rc);
4828 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4829 VMX_IGS_RFLAGS_RESERVED);
4830 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4831 uint32_t const u32Eflags = u64Val;
4832
4833 if ( fLongModeGuest
4834 || ( fUnrestrictedGuest
4835 && !(u64GuestCr0 & X86_CR0_PE)))
4836 {
4837 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4838 }
4839
4840 uint32_t u32EntryInfo;
4841 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4842 AssertRC(rc);
4843 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4844 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4845
4846 /*
4847 * 64-bit checks.
4848 */
4849 if (fLongModeGuest)
4850 {
4851 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4852 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4853 }
4854
4855 if ( !fLongModeGuest
4856 && (u64GuestCr4 & X86_CR4_PCIDE))
4857 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4858
4859 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4860 * 51:32 beyond the processor's physical-address width are 0. */
4861
4862 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4863 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4864 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4865
4866#ifndef IN_NEM_DARWIN
4867 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4868 AssertRC(rc);
4869 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4870
4871 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4872 AssertRC(rc);
4873 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4874#endif
4875
4876 /*
4877 * PERF_GLOBAL MSR.
4878 */
4879 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4880 {
4881 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4882 AssertRC(rc);
4883 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4884 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4885 }
4886
4887 /*
4888 * PAT MSR.
4889 */
4890 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4891 {
4892 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4893 AssertRC(rc);
4894 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4895 for (unsigned i = 0; i < 8; i++)
4896 {
4897 uint8_t u8Val = (u64Val & 0xff);
4898 if ( u8Val != 0 /* UC */
4899 && u8Val != 1 /* WC */
4900 && u8Val != 4 /* WT */
4901 && u8Val != 5 /* WP */
4902 && u8Val != 6 /* WB */
4903 && u8Val != 7 /* UC- */)
4904 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4905 u64Val >>= 8;
4906 }
4907 }
4908
4909 /*
4910 * EFER MSR.
4911 */
4912 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4913 {
4914 Assert(g_fHmVmxSupportsVmcsEfer);
4915 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4916 AssertRC(rc);
4917 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4918 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4919 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4920 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4921 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4922 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4923 * iemVmxVmentryCheckGuestState(). */
4924 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4925 || !(u64GuestCr0 & X86_CR0_PG)
4926 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4927 VMX_IGS_EFER_LMA_LME_MISMATCH);
4928 }
4929
4930 /*
4931 * Segment registers.
4932 */
4933 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4934 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4935 if (!(u32Eflags & X86_EFL_VM))
4936 {
4937 /* CS */
4938 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4939 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4940 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4941 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4942 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4943 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4944 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4945 /* CS cannot be loaded with NULL in protected mode. */
4946 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4947 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4948 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4949 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4950 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4951 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4952 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4953 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4954 else
4955 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4956
4957 /* SS */
4958 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4959 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4960 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4961 if ( !(pCtx->cr0 & X86_CR0_PE)
4962 || pCtx->cs.Attr.n.u4Type == 3)
4963 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4964
4965 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4966 {
4967 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4968 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4969 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4970 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4971 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4972 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4973 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4974 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4975 }
4976
4977 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4978 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4979 {
4980 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4981 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4982 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4983 || pCtx->ds.Attr.n.u4Type > 11
4984 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4985 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4986 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4987 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4988 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4989 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4990 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4991 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4992 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4993 }
4994 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4995 {
4996 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4997 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4998 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4999 || pCtx->es.Attr.n.u4Type > 11
5000 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5001 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5002 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5003 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5004 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5005 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5006 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5007 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5008 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5009 }
5010 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5011 {
5012 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5013 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5014 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5015 || pCtx->fs.Attr.n.u4Type > 11
5016 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5017 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5018 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5019 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5020 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5021 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5022 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5023 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5024 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5025 }
5026 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5027 {
5028 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5029 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5030 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5031 || pCtx->gs.Attr.n.u4Type > 11
5032 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5033 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5034 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5035 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5036 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5037 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5038 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5039 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5040 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5041 }
5042 /* 64-bit capable CPUs. */
5043 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5044 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5045 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5046 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5047 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5048 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5049 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5050 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5051 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5052 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5053 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5054 }
5055 else
5056 {
5057 /* V86 mode checks. */
5058 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5059 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5060 {
5061 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5062 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5063 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5064 }
5065 else
5066 {
5067 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5068 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5069 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5070 }
5071
5072 /* CS */
5073 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5074 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5075 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5076 /* SS */
5077 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5078 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5079 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5080 /* DS */
5081 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5082 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5083 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5084 /* ES */
5085 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5086 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5087 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5088 /* FS */
5089 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5090 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5091 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5092 /* GS */
5093 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5094 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5095 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5096 /* 64-bit capable CPUs. */
5097 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5098 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5099 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5100 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5101 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5102 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5103 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5104 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5105 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5106 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5107 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5108 }
5109
5110 /*
5111 * TR.
5112 */
5113 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5114 /* 64-bit capable CPUs. */
5115 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5116 if (fLongModeGuest)
5117 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5118 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5119 else
5120 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5121 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5122 VMX_IGS_TR_ATTR_TYPE_INVALID);
5123 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5124 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5125 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5126 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5127 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5128 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5129 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5130 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5131
5132 /*
5133 * GDTR and IDTR (64-bit capable checks).
5134 */
5135 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5136 AssertRC(rc);
5137 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5138
5139 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5140 AssertRC(rc);
5141 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5142
5143 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5144 AssertRC(rc);
5145 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5146
5147 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5148 AssertRC(rc);
5149 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5150
5151 /*
5152 * Guest Non-Register State.
5153 */
5154 /* Activity State. */
5155 uint32_t u32ActivityState;
5156 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5157 AssertRC(rc);
5158 HMVMX_CHECK_BREAK( !u32ActivityState
5159 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5160 VMX_IGS_ACTIVITY_STATE_INVALID);
5161 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5162 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5163
5164 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5165 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5166 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5167
5168 /** @todo Activity state and injecting interrupts. Left as a todo since we
5169 * currently don't use activity states but ACTIVE. */
5170
5171 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5172 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5173
5174 /* Guest interruptibility-state. */
5175 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5176 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5177 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5178 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5179 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5180 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5181 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5182 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5183 {
5184 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5185 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5186 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5187 }
5188 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5189 {
5190 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5191 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5192 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5193 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5194 }
5195 /** @todo Assumes the processor is not in SMM. */
5196 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5197 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5198 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5199 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5200 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5201 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5202 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5203 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5204
5205 /* Pending debug exceptions. */
5206 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5207 AssertRC(rc);
5208 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5209 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5210 u32Val = u64Val; /* For pending debug exceptions checks below. */
5211
5212 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5213 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5214 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5215 {
5216 if ( (u32Eflags & X86_EFL_TF)
5217 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5218 {
5219 /* Bit 14 is PendingDebug.BS. */
5220 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5221 }
5222 if ( !(u32Eflags & X86_EFL_TF)
5223 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5224 {
5225 /* Bit 14 is PendingDebug.BS. */
5226 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5227 }
5228 }
5229
5230#ifndef IN_NEM_DARWIN
5231 /* VMCS link pointer. */
5232 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5233 AssertRC(rc);
5234 if (u64Val != UINT64_C(0xffffffffffffffff))
5235 {
5236 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5237 /** @todo Bits beyond the processor's physical-address width MBZ. */
5238 /** @todo SMM checks. */
5239 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5240 Assert(pVmcsInfo->pvShadowVmcs);
5241 VMXVMCSREVID VmcsRevId;
5242 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5243 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5244 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5245 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5246 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5247 }
5248
5249 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5250 * not using nested paging? */
5251 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5252 && !fLongModeGuest
5253 && CPUMIsGuestInPAEModeEx(pCtx))
5254 {
5255 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5256 AssertRC(rc);
5257 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5258
5259 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5260 AssertRC(rc);
5261 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5262
5263 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5264 AssertRC(rc);
5265 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5266
5267 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5268 AssertRC(rc);
5269 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5270 }
5271#endif
5272
5273 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5274 if (uError == VMX_IGS_ERROR)
5275 uError = VMX_IGS_REASON_NOT_FOUND;
5276 } while (0);
5277
5278 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5279 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5280 return uError;
5281
5282#undef HMVMX_ERROR_BREAK
5283#undef HMVMX_CHECK_BREAK
5284}
5285
5286
5287#ifndef HMVMX_USE_FUNCTION_TABLE
5288/**
5289 * Handles a guest VM-exit from hardware-assisted VMX execution.
5290 *
5291 * @returns Strict VBox status code (i.e. informational status codes too).
5292 * @param pVCpu The cross context virtual CPU structure.
5293 * @param pVmxTransient The VMX-transient structure.
5294 */
5295DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5296{
5297#ifdef DEBUG_ramshankar
5298# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5299 do { \
5300 if (a_fSave != 0) \
5301 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5302 VBOXSTRICTRC rcStrict = a_CallExpr; \
5303 if (a_fSave != 0) \
5304 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5305 return rcStrict; \
5306 } while (0)
5307#else
5308# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5309#endif
5310 uint32_t const uExitReason = pVmxTransient->uExitReason;
5311 switch (uExitReason)
5312 {
5313 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5314 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5315 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5316 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5317 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5318 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5319 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5320 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5321 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5322 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5323 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5324 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5325 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5326 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5327 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5328 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5329 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5330 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5331 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5332 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5333 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5334 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5335 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5336 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5337 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5338 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5339 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5340 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5341 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5342 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5343#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5344 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5345 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5346 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5347 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5348 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5349 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5350 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5351 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5352 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5353 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5354#else
5355 case VMX_EXIT_VMCLEAR:
5356 case VMX_EXIT_VMLAUNCH:
5357 case VMX_EXIT_VMPTRLD:
5358 case VMX_EXIT_VMPTRST:
5359 case VMX_EXIT_VMREAD:
5360 case VMX_EXIT_VMRESUME:
5361 case VMX_EXIT_VMWRITE:
5362 case VMX_EXIT_VMXOFF:
5363 case VMX_EXIT_VMXON:
5364 case VMX_EXIT_INVVPID:
5365 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5366#endif
5367#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5368 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5369#else
5370 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5371#endif
5372
5373 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5374 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5375 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5376
5377 case VMX_EXIT_INIT_SIGNAL:
5378 case VMX_EXIT_SIPI:
5379 case VMX_EXIT_IO_SMI:
5380 case VMX_EXIT_SMI:
5381 case VMX_EXIT_ERR_MSR_LOAD:
5382 case VMX_EXIT_ERR_MACHINE_CHECK:
5383 case VMX_EXIT_PML_FULL:
5384 case VMX_EXIT_VIRTUALIZED_EOI:
5385 case VMX_EXIT_GDTR_IDTR_ACCESS:
5386 case VMX_EXIT_LDTR_TR_ACCESS:
5387 case VMX_EXIT_APIC_WRITE:
5388 case VMX_EXIT_RDRAND:
5389 case VMX_EXIT_RSM:
5390 case VMX_EXIT_VMFUNC:
5391 case VMX_EXIT_ENCLS:
5392 case VMX_EXIT_RDSEED:
5393 case VMX_EXIT_XSAVES:
5394 case VMX_EXIT_XRSTORS:
5395 case VMX_EXIT_UMWAIT:
5396 case VMX_EXIT_TPAUSE:
5397 case VMX_EXIT_LOADIWKEY:
5398 default:
5399 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5400 }
5401#undef VMEXIT_CALL_RET
5402}
5403#endif /* !HMVMX_USE_FUNCTION_TABLE */
5404
5405
5406#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5407/**
5408 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5409 *
5410 * @returns Strict VBox status code (i.e. informational status codes too).
5411 * @param pVCpu The cross context virtual CPU structure.
5412 * @param pVmxTransient The VMX-transient structure.
5413 */
5414DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5415{
5416 uint32_t const uExitReason = pVmxTransient->uExitReason;
5417 switch (uExitReason)
5418 {
5419# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5420 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5421 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5422# else
5423 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5424 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5425# endif
5426 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5427 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5428 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5429
5430 /*
5431 * We shouldn't direct host physical interrupts to the nested-guest.
5432 */
5433 case VMX_EXIT_EXT_INT:
5434 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5435
5436 /*
5437 * Instructions that cause VM-exits unconditionally or the condition is
5438 * always taken solely from the nested hypervisor (meaning if the VM-exit
5439 * happens, it's guaranteed to be a nested-guest VM-exit).
5440 *
5441 * - Provides VM-exit instruction length ONLY.
5442 */
5443 case VMX_EXIT_CPUID: /* Unconditional. */
5444 case VMX_EXIT_VMCALL:
5445 case VMX_EXIT_GETSEC:
5446 case VMX_EXIT_INVD:
5447 case VMX_EXIT_XSETBV:
5448 case VMX_EXIT_VMLAUNCH:
5449 case VMX_EXIT_VMRESUME:
5450 case VMX_EXIT_VMXOFF:
5451 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5452 case VMX_EXIT_VMFUNC:
5453 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5454
5455 /*
5456 * Instructions that cause VM-exits unconditionally or the condition is
5457 * always taken solely from the nested hypervisor (meaning if the VM-exit
5458 * happens, it's guaranteed to be a nested-guest VM-exit).
5459 *
5460 * - Provides VM-exit instruction length.
5461 * - Provides VM-exit information.
5462 * - Optionally provides Exit qualification.
5463 *
5464 * Since Exit qualification is 0 for all VM-exits where it is not
5465 * applicable, reading and passing it to the guest should produce
5466 * defined behavior.
5467 *
5468 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5469 */
5470 case VMX_EXIT_INVEPT: /* Unconditional. */
5471 case VMX_EXIT_INVVPID:
5472 case VMX_EXIT_VMCLEAR:
5473 case VMX_EXIT_VMPTRLD:
5474 case VMX_EXIT_VMPTRST:
5475 case VMX_EXIT_VMXON:
5476 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5477 case VMX_EXIT_LDTR_TR_ACCESS:
5478 case VMX_EXIT_RDRAND:
5479 case VMX_EXIT_RDSEED:
5480 case VMX_EXIT_XSAVES:
5481 case VMX_EXIT_XRSTORS:
5482 case VMX_EXIT_UMWAIT:
5483 case VMX_EXIT_TPAUSE:
5484 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5485
5486 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5487 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5492 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5493 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5494 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5495 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5496 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5497 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5498 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5499 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5500 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5501 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5502 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5503 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5504 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5505
5506 case VMX_EXIT_PREEMPT_TIMER:
5507 {
5508 /** @todo NSTVMX: Preempt timer. */
5509 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5510 }
5511
5512 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5513 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5514
5515 case VMX_EXIT_VMREAD:
5516 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5517
5518 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5519 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5520
5521 case VMX_EXIT_INIT_SIGNAL:
5522 case VMX_EXIT_SIPI:
5523 case VMX_EXIT_IO_SMI:
5524 case VMX_EXIT_SMI:
5525 case VMX_EXIT_ERR_MSR_LOAD:
5526 case VMX_EXIT_ERR_MACHINE_CHECK:
5527 case VMX_EXIT_PML_FULL:
5528 case VMX_EXIT_RSM:
5529 default:
5530 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5531 }
5532}
5533#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5534
5535
5536/** @name VM-exit helpers.
5537 * @{
5538 */
5539/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5540/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5541/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5542
5543/** Macro for VM-exits called unexpectedly. */
5544#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5545 do { \
5546 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5547 return VERR_VMX_UNEXPECTED_EXIT; \
5548 } while (0)
5549
5550#ifdef VBOX_STRICT
5551# ifndef IN_NEM_DARWIN
5552/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5553# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5554 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5555
5556# define HMVMX_ASSERT_PREEMPT_CPUID() \
5557 do { \
5558 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5559 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5560 } while (0)
5561
5562# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5563 do { \
5564 AssertPtr((a_pVCpu)); \
5565 AssertPtr((a_pVmxTransient)); \
5566 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
5567 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
5568 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
5569 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
5570 Assert((a_pVmxTransient)->pVmcsInfo); \
5571 Assert(ASMIntAreEnabled()); \
5572 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5573 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5574 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5575 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5576 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5577 HMVMX_ASSERT_PREEMPT_CPUID(); \
5578 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5579 } while (0)
5580# else
5581# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5582# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5583# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5584 do { \
5585 AssertPtr((a_pVCpu)); \
5586 AssertPtr((a_pVmxTransient)); \
5587 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
5588 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
5589 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
5590 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
5591 Assert((a_pVmxTransient)->pVmcsInfo); \
5592 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5593 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5594 } while (0)
5595# endif
5596
5597# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5598 do { \
5599 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5600 Assert((a_pVmxTransient)->fIsNestedGuest); \
5601 } while (0)
5602
5603# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5604 do { \
5605 Log4Func(("\n")); \
5606 } while (0)
5607#else
5608# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5609 do { \
5610 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5611 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5612 } while (0)
5613
5614# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5615 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5616
5617# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5618#endif
5619
5620#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5621/** Macro that does the necessary privilege checks and intercepted VM-exits for
5622 * guests that attempted to execute a VMX instruction. */
5623# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5624 do \
5625 { \
5626 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5627 if (rcStrictTmp == VINF_SUCCESS) \
5628 { /* likely */ } \
5629 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5630 { \
5631 Assert((a_pVCpu)->hm.s.Event.fPending); \
5632 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5633 return VINF_SUCCESS; \
5634 } \
5635 else \
5636 { \
5637 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5638 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5639 } \
5640 } while (0)
5641
5642/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5643# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5644 do \
5645 { \
5646 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5647 (a_pGCPtrEffAddr)); \
5648 if (rcStrictTmp == VINF_SUCCESS) \
5649 { /* likely */ } \
5650 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5651 { \
5652 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5653 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5654 NOREF(uXcptTmp); \
5655 return VINF_SUCCESS; \
5656 } \
5657 else \
5658 { \
5659 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5660 return rcStrictTmp; \
5661 } \
5662 } while (0)
5663#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5664
5665
5666/**
5667 * Advances the guest RIP by the specified number of bytes.
5668 *
5669 * @param pVCpu The cross context virtual CPU structure.
5670 * @param cbInstr Number of bytes to advance the RIP by.
5671 *
5672 * @remarks No-long-jump zone!!!
5673 */
5674DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5675{
5676 /* Advance the RIP. */
5677 pVCpu->cpum.GstCtx.rip += cbInstr;
5678 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5679
5680 /* Update interrupt inhibition. */
5681 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5682 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5683 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5684}
5685
5686
5687/**
5688 * Advances the guest RIP after reading it from the VMCS.
5689 *
5690 * @returns VBox status code, no informational status codes.
5691 * @param pVCpu The cross context virtual CPU structure.
5692 * @param pVmxTransient The VMX-transient structure.
5693 *
5694 * @remarks No-long-jump zone!!!
5695 */
5696static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5697{
5698 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5699 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5700 AssertRCReturn(rc, rc);
5701
5702 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5703 return VINF_SUCCESS;
5704}
5705
5706
5707/**
5708 * Handle a condition that occurred while delivering an event through the guest or
5709 * nested-guest IDT.
5710 *
5711 * @returns Strict VBox status code (i.e. informational status codes too).
5712 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5713 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5714 * to continue execution of the guest which will delivery the \#DF.
5715 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5716 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5717 *
5718 * @param pVCpu The cross context virtual CPU structure.
5719 * @param pVmxTransient The VMX-transient structure.
5720 *
5721 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5722 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5723 * is due to an EPT violation, PML full or SPP-related event.
5724 *
5725 * @remarks No-long-jump zone!!!
5726 */
5727static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5728{
5729 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5730 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5731 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5732 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5733 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5734 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5735
5736 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5737 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5738 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5739 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5740 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5741 {
5742 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5743 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5744
5745 /*
5746 * If the event was a software interrupt (generated with INT n) or a software exception
5747 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5748 * can handle the VM-exit and continue guest execution which will re-execute the
5749 * instruction rather than re-injecting the exception, as that can cause premature
5750 * trips to ring-3 before injection and involve TRPM which currently has no way of
5751 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5752 * the problem).
5753 */
5754 IEMXCPTRAISE enmRaise;
5755 IEMXCPTRAISEINFO fRaiseInfo;
5756 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5757 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5758 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5759 {
5760 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5761 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5762 }
5763 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5764 {
5765 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5766 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5767 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5768
5769 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5770 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5771
5772 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5773
5774 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5775 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5776 {
5777 pVmxTransient->fVectoringPF = true;
5778 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5779 }
5780 }
5781 else
5782 {
5783 /*
5784 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5785 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5786 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5787 */
5788 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5789 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5790 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5791 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5792 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5793 }
5794
5795 /*
5796 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5797 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5798 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5799 * subsequent VM-entry would fail, see @bugref{7445}.
5800 *
5801 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5802 */
5803 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5804 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5805 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5806 && CPUMIsGuestNmiBlocking(pVCpu))
5807 {
5808 CPUMSetGuestNmiBlocking(pVCpu, false);
5809 }
5810
5811 switch (enmRaise)
5812 {
5813 case IEMXCPTRAISE_CURRENT_XCPT:
5814 {
5815 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5816 Assert(rcStrict == VINF_SUCCESS);
5817 break;
5818 }
5819
5820 case IEMXCPTRAISE_PREV_EVENT:
5821 {
5822 uint32_t u32ErrCode;
5823 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5824 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5825 else
5826 u32ErrCode = 0;
5827
5828 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5829 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5830 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5831 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5832
5833 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5834 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5835 Assert(rcStrict == VINF_SUCCESS);
5836 break;
5837 }
5838
5839 case IEMXCPTRAISE_REEXEC_INSTR:
5840 Assert(rcStrict == VINF_SUCCESS);
5841 break;
5842
5843 case IEMXCPTRAISE_DOUBLE_FAULT:
5844 {
5845 /*
5846 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5847 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5848 */
5849 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5850 {
5851 pVmxTransient->fVectoringDoublePF = true;
5852 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5853 pVCpu->cpum.GstCtx.cr2));
5854 rcStrict = VINF_SUCCESS;
5855 }
5856 else
5857 {
5858 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5859 vmxHCSetPendingXcptDF(pVCpu);
5860 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5861 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5862 rcStrict = VINF_HM_DOUBLE_FAULT;
5863 }
5864 break;
5865 }
5866
5867 case IEMXCPTRAISE_TRIPLE_FAULT:
5868 {
5869 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5870 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5871 rcStrict = VINF_EM_RESET;
5872 break;
5873 }
5874
5875 case IEMXCPTRAISE_CPU_HANG:
5876 {
5877 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5878 rcStrict = VERR_EM_GUEST_CPU_HANG;
5879 break;
5880 }
5881
5882 default:
5883 {
5884 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5885 rcStrict = VERR_VMX_IPE_2;
5886 break;
5887 }
5888 }
5889 }
5890 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5891 && !CPUMIsGuestNmiBlocking(pVCpu))
5892 {
5893 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5894 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5895 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5896 {
5897 /*
5898 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5899 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5900 * that virtual NMIs remain blocked until the IRET execution is completed.
5901 *
5902 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5903 */
5904 CPUMSetGuestNmiBlocking(pVCpu, true);
5905 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5906 }
5907 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5908 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5909 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5910 {
5911 /*
5912 * Execution of IRET caused an EPT violation, page-modification log-full event or
5913 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5914 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5915 * that virtual NMIs remain blocked until the IRET execution is completed.
5916 *
5917 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5918 */
5919 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5920 {
5921 CPUMSetGuestNmiBlocking(pVCpu, true);
5922 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5923 }
5924 }
5925 }
5926
5927 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5928 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5929 return rcStrict;
5930}
5931
5932
5933#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5934/**
5935 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5936 * guest attempting to execute a VMX instruction.
5937 *
5938 * @returns Strict VBox status code (i.e. informational status codes too).
5939 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5940 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5941 *
5942 * @param pVCpu The cross context virtual CPU structure.
5943 * @param uExitReason The VM-exit reason.
5944 *
5945 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5946 * @remarks No-long-jump zone!!!
5947 */
5948static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5949{
5950 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5951 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5952
5953 /*
5954 * The physical CPU would have already checked the CPU mode/code segment.
5955 * We shall just assert here for paranoia.
5956 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5957 */
5958 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5959 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5960 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5961
5962 if (uExitReason == VMX_EXIT_VMXON)
5963 {
5964 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5965
5966 /*
5967 * We check CR4.VMXE because it is required to be always set while in VMX operation
5968 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5969 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5970 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5971 */
5972 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5973 {
5974 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5975 vmxHCSetPendingXcptUD(pVCpu);
5976 return VINF_HM_PENDING_XCPT;
5977 }
5978 }
5979 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5980 {
5981 /*
5982 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5983 * (other than VMXON), we need to raise a #UD.
5984 */
5985 Log4Func(("Not in VMX root mode -> #UD\n"));
5986 vmxHCSetPendingXcptUD(pVCpu);
5987 return VINF_HM_PENDING_XCPT;
5988 }
5989
5990 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5991 return VINF_SUCCESS;
5992}
5993
5994
5995/**
5996 * Decodes the memory operand of an instruction that caused a VM-exit.
5997 *
5998 * The Exit qualification field provides the displacement field for memory
5999 * operand instructions, if any.
6000 *
6001 * @returns Strict VBox status code (i.e. informational status codes too).
6002 * @retval VINF_SUCCESS if the operand was successfully decoded.
6003 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6004 * operand.
6005 * @param pVCpu The cross context virtual CPU structure.
6006 * @param uExitInstrInfo The VM-exit instruction information field.
6007 * @param enmMemAccess The memory operand's access type (read or write).
6008 * @param GCPtrDisp The instruction displacement field, if any. For
6009 * RIP-relative addressing pass RIP + displacement here.
6010 * @param pGCPtrMem Where to store the effective destination memory address.
6011 *
6012 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6013 * virtual-8086 mode hence skips those checks while verifying if the
6014 * segment is valid.
6015 */
6016static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6017 PRTGCPTR pGCPtrMem)
6018{
6019 Assert(pGCPtrMem);
6020 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6021 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6022 | CPUMCTX_EXTRN_CR0);
6023
6024 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6025 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6026 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6027
6028 VMXEXITINSTRINFO ExitInstrInfo;
6029 ExitInstrInfo.u = uExitInstrInfo;
6030 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6031 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6032 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6033 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6034 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6035 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6036 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6037 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6038 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6039
6040 /*
6041 * Validate instruction information.
6042 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6043 */
6044 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6045 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6046 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6047 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6048 AssertLogRelMsgReturn(fIsMemOperand,
6049 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6050
6051 /*
6052 * Compute the complete effective address.
6053 *
6054 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6055 * See AMD spec. 4.5.2 "Segment Registers".
6056 */
6057 RTGCPTR GCPtrMem = GCPtrDisp;
6058 if (fBaseRegValid)
6059 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6060 if (fIdxRegValid)
6061 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6062
6063 RTGCPTR const GCPtrOff = GCPtrMem;
6064 if ( !fIsLongMode
6065 || iSegReg >= X86_SREG_FS)
6066 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6067 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6068
6069 /*
6070 * Validate effective address.
6071 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6072 */
6073 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6074 Assert(cbAccess > 0);
6075 if (fIsLongMode)
6076 {
6077 if (X86_IS_CANONICAL(GCPtrMem))
6078 {
6079 *pGCPtrMem = GCPtrMem;
6080 return VINF_SUCCESS;
6081 }
6082
6083 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6084 * "Data Limit Checks in 64-bit Mode". */
6085 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6086 vmxHCSetPendingXcptGP(pVCpu, 0);
6087 return VINF_HM_PENDING_XCPT;
6088 }
6089
6090 /*
6091 * This is a watered down version of iemMemApplySegment().
6092 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6093 * and segment CPL/DPL checks are skipped.
6094 */
6095 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6096 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6097 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6098
6099 /* Check if the segment is present and usable. */
6100 if ( pSel->Attr.n.u1Present
6101 && !pSel->Attr.n.u1Unusable)
6102 {
6103 Assert(pSel->Attr.n.u1DescType);
6104 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6105 {
6106 /* Check permissions for the data segment. */
6107 if ( enmMemAccess == VMXMEMACCESS_WRITE
6108 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6109 {
6110 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6111 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6112 return VINF_HM_PENDING_XCPT;
6113 }
6114
6115 /* Check limits if it's a normal data segment. */
6116 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6117 {
6118 if ( GCPtrFirst32 > pSel->u32Limit
6119 || GCPtrLast32 > pSel->u32Limit)
6120 {
6121 Log4Func(("Data segment limit exceeded. "
6122 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6123 GCPtrLast32, pSel->u32Limit));
6124 if (iSegReg == X86_SREG_SS)
6125 vmxHCSetPendingXcptSS(pVCpu, 0);
6126 else
6127 vmxHCSetPendingXcptGP(pVCpu, 0);
6128 return VINF_HM_PENDING_XCPT;
6129 }
6130 }
6131 else
6132 {
6133 /* Check limits if it's an expand-down data segment.
6134 Note! The upper boundary is defined by the B bit, not the G bit! */
6135 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6136 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6137 {
6138 Log4Func(("Expand-down data segment limit exceeded. "
6139 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6140 GCPtrLast32, pSel->u32Limit));
6141 if (iSegReg == X86_SREG_SS)
6142 vmxHCSetPendingXcptSS(pVCpu, 0);
6143 else
6144 vmxHCSetPendingXcptGP(pVCpu, 0);
6145 return VINF_HM_PENDING_XCPT;
6146 }
6147 }
6148 }
6149 else
6150 {
6151 /* Check permissions for the code segment. */
6152 if ( enmMemAccess == VMXMEMACCESS_WRITE
6153 || ( enmMemAccess == VMXMEMACCESS_READ
6154 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6155 {
6156 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6157 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6158 vmxHCSetPendingXcptGP(pVCpu, 0);
6159 return VINF_HM_PENDING_XCPT;
6160 }
6161
6162 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6163 if ( GCPtrFirst32 > pSel->u32Limit
6164 || GCPtrLast32 > pSel->u32Limit)
6165 {
6166 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6167 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6168 if (iSegReg == X86_SREG_SS)
6169 vmxHCSetPendingXcptSS(pVCpu, 0);
6170 else
6171 vmxHCSetPendingXcptGP(pVCpu, 0);
6172 return VINF_HM_PENDING_XCPT;
6173 }
6174 }
6175 }
6176 else
6177 {
6178 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6179 vmxHCSetPendingXcptGP(pVCpu, 0);
6180 return VINF_HM_PENDING_XCPT;
6181 }
6182
6183 *pGCPtrMem = GCPtrMem;
6184 return VINF_SUCCESS;
6185}
6186#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6187
6188
6189/**
6190 * VM-exit helper for LMSW.
6191 */
6192static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6193{
6194 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6195 AssertRCReturn(rc, rc);
6196
6197 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6198 AssertMsg( rcStrict == VINF_SUCCESS
6199 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6200
6201 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6202 if (rcStrict == VINF_IEM_RAISED_XCPT)
6203 {
6204 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6205 rcStrict = VINF_SUCCESS;
6206 }
6207
6208 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6209 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6210 return rcStrict;
6211}
6212
6213
6214/**
6215 * VM-exit helper for CLTS.
6216 */
6217static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6218{
6219 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6220 AssertRCReturn(rc, rc);
6221
6222 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6223 AssertMsg( rcStrict == VINF_SUCCESS
6224 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6225
6226 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6227 if (rcStrict == VINF_IEM_RAISED_XCPT)
6228 {
6229 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6230 rcStrict = VINF_SUCCESS;
6231 }
6232
6233 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6234 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6235 return rcStrict;
6236}
6237
6238
6239/**
6240 * VM-exit helper for MOV from CRx (CRx read).
6241 */
6242static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6243{
6244 Assert(iCrReg < 16);
6245 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6246
6247 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6248 AssertRCReturn(rc, rc);
6249
6250 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6251 AssertMsg( rcStrict == VINF_SUCCESS
6252 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6253
6254 if (iGReg == X86_GREG_xSP)
6255 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6256 else
6257 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6258#ifdef VBOX_WITH_STATISTICS
6259 switch (iCrReg)
6260 {
6261 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6262 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6263 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6264 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6265 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6266 }
6267#endif
6268 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6269 return rcStrict;
6270}
6271
6272
6273/**
6274 * VM-exit helper for MOV to CRx (CRx write).
6275 */
6276static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6277{
6278 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6279
6280 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6281 AssertMsg( rcStrict == VINF_SUCCESS
6282 || rcStrict == VINF_IEM_RAISED_XCPT
6283 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6284
6285 switch (iCrReg)
6286 {
6287 case 0:
6288 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6289 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6290 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6291 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6292 break;
6293
6294 case 2:
6295 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6296 /* Nothing to do here, CR2 it's not part of the VMCS. */
6297 break;
6298
6299 case 3:
6300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6301 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6302 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6303 break;
6304
6305 case 4:
6306 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6307 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6308#ifndef IN_NEM_DARWIN
6309 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6310 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6311#else
6312 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6313#endif
6314 break;
6315
6316 case 8:
6317 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6318 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6319 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6320 break;
6321
6322 default:
6323 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6324 break;
6325 }
6326
6327 if (rcStrict == VINF_IEM_RAISED_XCPT)
6328 {
6329 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6330 rcStrict = VINF_SUCCESS;
6331 }
6332 return rcStrict;
6333}
6334
6335
6336/**
6337 * VM-exit exception handler for \#PF (Page-fault exception).
6338 *
6339 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6340 */
6341static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6342{
6343 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6344 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6345
6346#ifndef IN_NEM_DARWIN
6347 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6348 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6349 { /* likely */ }
6350 else
6351#endif
6352 {
6353#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6354 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6355#endif
6356 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6357 if (!pVmxTransient->fVectoringDoublePF)
6358 {
6359 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6360 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6361 }
6362 else
6363 {
6364 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6365 Assert(!pVmxTransient->fIsNestedGuest);
6366 vmxHCSetPendingXcptDF(pVCpu);
6367 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6368 }
6369 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6370 return VINF_SUCCESS;
6371 }
6372
6373 Assert(!pVmxTransient->fIsNestedGuest);
6374
6375 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6376 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6377 if (pVmxTransient->fVectoringPF)
6378 {
6379 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6380 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6381 }
6382
6383 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6384 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6385 AssertRCReturn(rc, rc);
6386
6387 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6388 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6389
6390 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6391 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6392
6393 Log4Func(("#PF: rc=%Rrc\n", rc));
6394 if (rc == VINF_SUCCESS)
6395 {
6396 /*
6397 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6398 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6399 */
6400 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6401 TRPMResetTrap(pVCpu);
6402 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6403 return rc;
6404 }
6405
6406 if (rc == VINF_EM_RAW_GUEST_TRAP)
6407 {
6408 if (!pVmxTransient->fVectoringDoublePF)
6409 {
6410 /* It's a guest page fault and needs to be reflected to the guest. */
6411 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6412 TRPMResetTrap(pVCpu);
6413 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6414 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6415 uGstErrorCode, pVmxTransient->uExitQual);
6416 }
6417 else
6418 {
6419 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6420 TRPMResetTrap(pVCpu);
6421 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6422 vmxHCSetPendingXcptDF(pVCpu);
6423 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6424 }
6425
6426 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6427 return VINF_SUCCESS;
6428 }
6429
6430 TRPMResetTrap(pVCpu);
6431 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6432 return rc;
6433}
6434
6435
6436/**
6437 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6438 *
6439 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6440 */
6441static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6442{
6443 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6444 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6445
6446 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6447 AssertRCReturn(rc, rc);
6448
6449 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6450 {
6451 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6452 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6453
6454 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6455 * provides VM-exit instruction length. If this causes problem later,
6456 * disassemble the instruction like it's done on AMD-V. */
6457 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6458 AssertRCReturn(rc2, rc2);
6459 return rc;
6460 }
6461
6462 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6463 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * VM-exit exception handler for \#BP (Breakpoint exception).
6470 *
6471 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6472 */
6473static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6474{
6475 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6476 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6477
6478 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6479 AssertRCReturn(rc, rc);
6480
6481 VBOXSTRICTRC rcStrict;
6482 if (!pVmxTransient->fIsNestedGuest)
6483 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6484 else
6485 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6486
6487 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6488 {
6489 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6490 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6491 rcStrict = VINF_SUCCESS;
6492 }
6493
6494 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6495 return rcStrict;
6496}
6497
6498
6499/**
6500 * VM-exit exception handler for \#AC (Alignment-check exception).
6501 *
6502 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6503 */
6504static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6505{
6506 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6507
6508 /*
6509 * Detect #ACs caused by host having enabled split-lock detection.
6510 * Emulate such instructions.
6511 */
6512 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6513 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6514 AssertRCReturn(rc, rc);
6515 /** @todo detect split lock in cpu feature? */
6516 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6517 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6518 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6519 || CPUMGetGuestCPL(pVCpu) != 3
6520 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6521 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6522 {
6523 /*
6524 * Check for debug/trace events and import state accordingly.
6525 */
6526 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6527 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6528 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6529#ifndef IN_NEM_DARWIN
6530 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6531#endif
6532 )
6533 {
6534 if (pVM->cCpus == 1)
6535 {
6536#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6537 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6538#else
6539 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6540#endif
6541 AssertRCReturn(rc, rc);
6542 }
6543 }
6544 else
6545 {
6546 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6547 AssertRCReturn(rc, rc);
6548
6549 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6550
6551 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6552 {
6553 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6554 if (rcStrict != VINF_SUCCESS)
6555 return rcStrict;
6556 }
6557 }
6558
6559 /*
6560 * Emulate the instruction.
6561 *
6562 * We have to ignore the LOCK prefix here as we must not retrigger the
6563 * detection on the host. This isn't all that satisfactory, though...
6564 */
6565 if (pVM->cCpus == 1)
6566 {
6567 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6568 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6569
6570 /** @todo For SMP configs we should do a rendezvous here. */
6571 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6572 if (rcStrict == VINF_SUCCESS)
6573#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6574 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6575 HM_CHANGED_GUEST_RIP
6576 | HM_CHANGED_GUEST_RFLAGS
6577 | HM_CHANGED_GUEST_GPRS_MASK
6578 | HM_CHANGED_GUEST_CS
6579 | HM_CHANGED_GUEST_SS);
6580#else
6581 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6582#endif
6583 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6584 {
6585 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6586 rcStrict = VINF_SUCCESS;
6587 }
6588 return rcStrict;
6589 }
6590 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6591 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6592 return VINF_EM_EMULATE_SPLIT_LOCK;
6593 }
6594
6595 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6596 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6597 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6598
6599 /* Re-inject it. We'll detect any nesting before getting here. */
6600 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6601 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6602 return VINF_SUCCESS;
6603}
6604
6605
6606/**
6607 * VM-exit exception handler for \#DB (Debug exception).
6608 *
6609 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6610 */
6611static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6612{
6613 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6614 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6615
6616 /*
6617 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6618 */
6619 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6620
6621 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6622 uint64_t const uDR6 = X86_DR6_INIT_VAL
6623 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6624 | X86_DR6_BD | X86_DR6_BS));
6625
6626 int rc;
6627 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6628 if (!pVmxTransient->fIsNestedGuest)
6629 {
6630 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6631
6632 /*
6633 * Prevents stepping twice over the same instruction when the guest is stepping using
6634 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6635 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6636 */
6637 if ( rc == VINF_EM_DBG_STEPPED
6638 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6639 {
6640 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6641 rc = VINF_EM_RAW_GUEST_TRAP;
6642 }
6643 }
6644 else
6645 rc = VINF_EM_RAW_GUEST_TRAP;
6646 Log6Func(("rc=%Rrc\n", rc));
6647 if (rc == VINF_EM_RAW_GUEST_TRAP)
6648 {
6649 /*
6650 * The exception was for the guest. Update DR6, DR7.GD and
6651 * IA32_DEBUGCTL.LBR before forwarding it.
6652 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6653 */
6654#ifndef IN_NEM_DARWIN
6655 VMMRZCallRing3Disable(pVCpu);
6656 HM_DISABLE_PREEMPT(pVCpu);
6657
6658 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6659 pCtx->dr[6] |= uDR6;
6660 if (CPUMIsGuestDebugStateActive(pVCpu))
6661 ASMSetDR6(pCtx->dr[6]);
6662
6663 HM_RESTORE_PREEMPT();
6664 VMMRZCallRing3Enable(pVCpu);
6665#else
6666 /** @todo */
6667#endif
6668
6669 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6670 AssertRCReturn(rc, rc);
6671
6672 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6673 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6674
6675 /* Paranoia. */
6676 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6677 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6678
6679 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6680 AssertRC(rc);
6681
6682 /*
6683 * Raise #DB in the guest.
6684 *
6685 * It is important to reflect exactly what the VM-exit gave us (preserving the
6686 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6687 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6688 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6689 *
6690 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6691 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6692 */
6693 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6694 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6695 return VINF_SUCCESS;
6696 }
6697
6698 /*
6699 * Not a guest trap, must be a hypervisor related debug event then.
6700 * Update DR6 in case someone is interested in it.
6701 */
6702 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6703 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6704 CPUMSetHyperDR6(pVCpu, uDR6);
6705
6706 return rc;
6707}
6708
6709
6710/**
6711 * Hacks its way around the lovely mesa driver's backdoor accesses.
6712 *
6713 * @sa hmR0SvmHandleMesaDrvGp.
6714 */
6715static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6716{
6717 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6718 RT_NOREF(pCtx);
6719
6720 /* For now we'll just skip the instruction. */
6721 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6722}
6723
6724
6725/**
6726 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6727 * backdoor logging w/o checking what it is running inside.
6728 *
6729 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6730 * backdoor port and magic numbers loaded in registers.
6731 *
6732 * @returns true if it is, false if it isn't.
6733 * @sa hmR0SvmIsMesaDrvGp.
6734 */
6735DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6736{
6737 /* 0xed: IN eAX,dx */
6738 uint8_t abInstr[1];
6739 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6740 return false;
6741
6742 /* Check that it is #GP(0). */
6743 if (pVmxTransient->uExitIntErrorCode != 0)
6744 return false;
6745
6746 /* Check magic and port. */
6747 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6748 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6749 if (pCtx->rax != UINT32_C(0x564d5868))
6750 return false;
6751 if (pCtx->dx != UINT32_C(0x5658))
6752 return false;
6753
6754 /* Flat ring-3 CS. */
6755 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6756 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6757 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6758 if (pCtx->cs.Attr.n.u2Dpl != 3)
6759 return false;
6760 if (pCtx->cs.u64Base != 0)
6761 return false;
6762
6763 /* Check opcode. */
6764 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6765 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6766 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6767 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6768 if (RT_FAILURE(rc))
6769 return false;
6770 if (abInstr[0] != 0xed)
6771 return false;
6772
6773 return true;
6774}
6775
6776
6777/**
6778 * VM-exit exception handler for \#GP (General-protection exception).
6779 *
6780 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6781 */
6782static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6783{
6784 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6785 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6786
6787 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6788 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6789#ifndef IN_NEM_DARWIN
6790 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6791 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6792 { /* likely */ }
6793 else
6794#endif
6795 {
6796#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6797# ifndef IN_NEM_DARWIN
6798 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6799# else
6800 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6801# endif
6802#endif
6803 /*
6804 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6805 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6806 */
6807 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6808 AssertRCReturn(rc, rc);
6809 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6810 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6811
6812 if ( pVmxTransient->fIsNestedGuest
6813 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6814 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6815 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6816 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6817 else
6818 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6819 return rc;
6820 }
6821
6822#ifndef IN_NEM_DARWIN
6823 Assert(CPUMIsGuestInRealModeEx(pCtx));
6824 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6825 Assert(!pVmxTransient->fIsNestedGuest);
6826
6827 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6828 AssertRCReturn(rc, rc);
6829
6830 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6831 if (rcStrict == VINF_SUCCESS)
6832 {
6833 if (!CPUMIsGuestInRealModeEx(pCtx))
6834 {
6835 /*
6836 * The guest is no longer in real-mode, check if we can continue executing the
6837 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6838 */
6839 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6840 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6841 {
6842 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6843 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6844 }
6845 else
6846 {
6847 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6848 rcStrict = VINF_EM_RESCHEDULE;
6849 }
6850 }
6851 else
6852 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6853 }
6854 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6855 {
6856 rcStrict = VINF_SUCCESS;
6857 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6858 }
6859 return VBOXSTRICTRC_VAL(rcStrict);
6860#endif
6861}
6862
6863
6864/**
6865 * VM-exit exception handler for \#DE (Divide Error).
6866 *
6867 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6868 */
6869static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6870{
6871 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6872 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
6873
6874 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6875 AssertRCReturn(rc, rc);
6876
6877 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
6878 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
6879 {
6880 uint8_t cbInstr = 0;
6881 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
6882 if (rc2 == VINF_SUCCESS)
6883 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
6884 else if (rc2 == VERR_NOT_FOUND)
6885 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
6886 else
6887 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
6888 }
6889 else
6890 rcStrict = VINF_SUCCESS; /* Do nothing. */
6891
6892 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
6893 if (RT_FAILURE(rcStrict))
6894 {
6895 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6896 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6897 rcStrict = VINF_SUCCESS;
6898 }
6899
6900 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
6901 return VBOXSTRICTRC_VAL(rcStrict);
6902}
6903
6904
6905/**
6906 * VM-exit exception handler wrapper for all other exceptions that are not handled
6907 * by a specific handler.
6908 *
6909 * This simply re-injects the exception back into the VM without any special
6910 * processing.
6911 *
6912 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6913 */
6914static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6915{
6916 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6917
6918#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6919# ifndef IN_NEM_DARWIN
6920 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6921 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6922 ("uVector=%#x u32XcptBitmap=%#X32\n",
6923 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6924 NOREF(pVmcsInfo);
6925# endif
6926#endif
6927
6928 /*
6929 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6930 * would have been handled while checking exits due to event delivery.
6931 */
6932 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6933
6934#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6935 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6936 AssertRCReturn(rc, rc);
6937 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6938#endif
6939
6940#ifdef VBOX_WITH_STATISTICS
6941 switch (uVector)
6942 {
6943 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6944 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6945 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6946 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6947 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6948 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6949 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6950 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6951 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6952 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6953 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6954 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6955 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6956 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6957 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6958 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6959 default:
6960 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6961 break;
6962 }
6963#endif
6964
6965 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6966 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6967 NOREF(uVector);
6968
6969 /* Re-inject the original exception into the guest. */
6970 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6971 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6972 return VINF_SUCCESS;
6973}
6974
6975
6976/**
6977 * VM-exit exception handler for all exceptions (except NMIs!).
6978 *
6979 * @remarks This may be called for both guests and nested-guests. Take care to not
6980 * make assumptions and avoid doing anything that is not relevant when
6981 * executing a nested-guest (e.g., Mesa driver hacks).
6982 */
6983static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6984{
6985 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6986
6987 /*
6988 * If this VM-exit occurred while delivering an event through the guest IDT, take
6989 * action based on the return code and additional hints (e.g. for page-faults)
6990 * that will be updated in the VMX transient structure.
6991 */
6992 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6993 if (rcStrict == VINF_SUCCESS)
6994 {
6995 /*
6996 * If an exception caused a VM-exit due to delivery of an event, the original
6997 * event may have to be re-injected into the guest. We shall reinject it and
6998 * continue guest execution. However, page-fault is a complicated case and
6999 * needs additional processing done in vmxHCExitXcptPF().
7000 */
7001 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7002 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7003 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7004 || uVector == X86_XCPT_PF)
7005 {
7006 switch (uVector)
7007 {
7008 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7009 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7010 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7011 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7012 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7013 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7014 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7015 default:
7016 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7017 }
7018 }
7019 /* else: inject pending event before resuming guest execution. */
7020 }
7021 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7022 {
7023 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7024 rcStrict = VINF_SUCCESS;
7025 }
7026
7027 return rcStrict;
7028}
7029/** @} */
7030
7031
7032/** @name VM-exit handlers.
7033 * @{
7034 */
7035/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7036/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7037/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7038
7039/**
7040 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7041 */
7042HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7043{
7044 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7045 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7046
7047#ifndef IN_NEM_DARWIN
7048 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7049 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7050 return VINF_SUCCESS;
7051 return VINF_EM_RAW_INTERRUPT;
7052#else
7053 return VINF_SUCCESS;
7054#endif
7055}
7056
7057
7058/**
7059 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7060 * VM-exit.
7061 */
7062HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7063{
7064 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7065 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7066
7067 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7068
7069 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7070 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7071 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7072
7073 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7074 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7075 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7076 NOREF(pVmcsInfo);
7077
7078 VBOXSTRICTRC rcStrict;
7079 switch (uExitIntType)
7080 {
7081#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7082 /*
7083 * Host physical NMIs:
7084 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7085 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7086 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7087 *
7088 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7089 * See Intel spec. 27.5.5 "Updating Non-Register State".
7090 */
7091 case VMX_EXIT_INT_INFO_TYPE_NMI:
7092 {
7093 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7094 break;
7095 }
7096#endif
7097
7098 /*
7099 * Privileged software exceptions (#DB from ICEBP),
7100 * Software exceptions (#BP and #OF),
7101 * Hardware exceptions:
7102 * Process the required exceptions and resume guest execution if possible.
7103 */
7104 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7105 Assert(uVector == X86_XCPT_DB);
7106 RT_FALL_THRU();
7107 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7108 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7109 RT_FALL_THRU();
7110 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7111 {
7112 NOREF(uVector);
7113 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7114 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7115 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7116 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7117
7118 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7119 break;
7120 }
7121
7122 default:
7123 {
7124 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7125 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7126 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7127 break;
7128 }
7129 }
7130
7131 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7132 return rcStrict;
7133}
7134
7135
7136/**
7137 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7138 */
7139HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7140{
7141 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7142
7143 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7144 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7145 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7146
7147 /* Evaluate and deliver pending events and resume guest execution. */
7148 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7149 return VINF_SUCCESS;
7150}
7151
7152
7153/**
7154 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7155 */
7156HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7157{
7158 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7159
7160 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7161 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7162 {
7163 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7164 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7165 }
7166
7167 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7168
7169 /*
7170 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7171 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7172 */
7173 uint32_t fIntrState;
7174 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7175 AssertRC(rc);
7176 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7177 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7178 {
7179 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7180 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7181
7182 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7183 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7184 AssertRC(rc);
7185 }
7186
7187 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7188 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7189
7190 /* Evaluate and deliver pending events and resume guest execution. */
7191 return VINF_SUCCESS;
7192}
7193
7194
7195/**
7196 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7197 */
7198HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7199{
7200 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7201 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7202}
7203
7204
7205/**
7206 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7207 */
7208HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7209{
7210 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7211 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7212}
7213
7214
7215/**
7216 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7217 */
7218HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7219{
7220 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7221
7222 /*
7223 * Get the state we need and update the exit history entry.
7224 */
7225 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7226 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7227
7228 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7229 AssertRCReturn(rc, rc);
7230
7231 VBOXSTRICTRC rcStrict;
7232 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7233 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7234 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7235 if (!pExitRec)
7236 {
7237 /*
7238 * Regular CPUID instruction execution.
7239 */
7240 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7241 if (rcStrict == VINF_SUCCESS)
7242 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7243 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7244 {
7245 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7246 rcStrict = VINF_SUCCESS;
7247 }
7248 }
7249 else
7250 {
7251 /*
7252 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7253 */
7254 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7255 AssertRCReturn(rc2, rc2);
7256
7257 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7258 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7259
7260 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7261 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7262
7263 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7264 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7265 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7266 }
7267 return rcStrict;
7268}
7269
7270
7271/**
7272 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7273 */
7274HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7275{
7276 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7277
7278 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7279 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7280 AssertRCReturn(rc, rc);
7281
7282 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7283 return VINF_EM_RAW_EMULATE_INSTR;
7284
7285 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7286 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7287}
7288
7289
7290/**
7291 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7292 */
7293HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7294{
7295 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7296
7297 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7298 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7299 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7300 AssertRCReturn(rc, rc);
7301
7302 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7303 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7304 {
7305 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7306 we must reset offsetting on VM-entry. See @bugref{6634}. */
7307 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7308 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7309 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7310 }
7311 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7312 {
7313 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7314 rcStrict = VINF_SUCCESS;
7315 }
7316 return rcStrict;
7317}
7318
7319
7320/**
7321 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7322 */
7323HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7324{
7325 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7326
7327 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7328 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7329 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7330 AssertRCReturn(rc, rc);
7331
7332 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7333 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7334 {
7335 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7336 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7337 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7338 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7339 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7340 }
7341 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7342 {
7343 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7344 rcStrict = VINF_SUCCESS;
7345 }
7346 return rcStrict;
7347}
7348
7349
7350/**
7351 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7352 */
7353HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7354{
7355 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7356
7357 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7358 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7359 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7360 AssertRCReturn(rc, rc);
7361
7362 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7363 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7364 if (RT_LIKELY(rc == VINF_SUCCESS))
7365 {
7366 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7367 Assert(pVmxTransient->cbExitInstr == 2);
7368 }
7369 else
7370 {
7371 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7372 rc = VERR_EM_INTERPRETER;
7373 }
7374 return rc;
7375}
7376
7377
7378/**
7379 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7380 */
7381HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7382{
7383 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7384
7385 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7386 if (EMAreHypercallInstructionsEnabled(pVCpu))
7387 {
7388 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7389 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7390 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7391 AssertRCReturn(rc, rc);
7392
7393 /* Perform the hypercall. */
7394 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7395 if (rcStrict == VINF_SUCCESS)
7396 {
7397 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7398 AssertRCReturn(rc, rc);
7399 }
7400 else
7401 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7402 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7403 || RT_FAILURE(rcStrict));
7404
7405 /* If the hypercall changes anything other than guest's general-purpose registers,
7406 we would need to reload the guest changed bits here before VM-entry. */
7407 }
7408 else
7409 Log4Func(("Hypercalls not enabled\n"));
7410
7411 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7412 if (RT_FAILURE(rcStrict))
7413 {
7414 vmxHCSetPendingXcptUD(pVCpu);
7415 rcStrict = VINF_SUCCESS;
7416 }
7417
7418 return rcStrict;
7419}
7420
7421
7422/**
7423 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7424 */
7425HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7426{
7427 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7428#ifndef IN_NEM_DARWIN
7429 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7430#endif
7431
7432 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7433 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7434 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7435 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7436 AssertRCReturn(rc, rc);
7437
7438 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7439
7440 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7441 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7442 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7443 {
7444 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7445 rcStrict = VINF_SUCCESS;
7446 }
7447 else
7448 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7449 VBOXSTRICTRC_VAL(rcStrict)));
7450 return rcStrict;
7451}
7452
7453
7454/**
7455 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7456 */
7457HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7458{
7459 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7460
7461 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7462 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7463 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7464 AssertRCReturn(rc, rc);
7465
7466 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7467 if (rcStrict == VINF_SUCCESS)
7468 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7469 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7470 {
7471 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7472 rcStrict = VINF_SUCCESS;
7473 }
7474
7475 return rcStrict;
7476}
7477
7478
7479/**
7480 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7481 */
7482HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7483{
7484 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7485
7486 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7487 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7488 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7489 AssertRCReturn(rc, rc);
7490
7491 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7492 if (RT_SUCCESS(rcStrict))
7493 {
7494 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7495 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7496 rcStrict = VINF_SUCCESS;
7497 }
7498
7499 return rcStrict;
7500}
7501
7502
7503/**
7504 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7505 * VM-exit.
7506 */
7507HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7508{
7509 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7510 return VINF_EM_RESET;
7511}
7512
7513
7514/**
7515 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7516 */
7517HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7518{
7519 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7520
7521 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7522 AssertRCReturn(rc, rc);
7523
7524 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7525 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7526 rc = VINF_SUCCESS;
7527 else
7528 rc = VINF_EM_HALT;
7529
7530 if (rc != VINF_SUCCESS)
7531 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7532 return rc;
7533}
7534
7535
7536#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7537/**
7538 * VM-exit handler for instructions that result in a \#UD exception delivered to
7539 * the guest.
7540 */
7541HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7542{
7543 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7544 vmxHCSetPendingXcptUD(pVCpu);
7545 return VINF_SUCCESS;
7546}
7547#endif
7548
7549
7550/**
7551 * VM-exit handler for expiry of the VMX-preemption timer.
7552 */
7553HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7554{
7555 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7556
7557 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7558 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7559Log12(("vmxHCExitPreemptTimer:\n"));
7560
7561 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7562 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7563 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7564 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7565 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7566}
7567
7568
7569/**
7570 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7571 */
7572HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7573{
7574 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7575
7576 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7577 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7578 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7579 AssertRCReturn(rc, rc);
7580
7581 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7582 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7583 : HM_CHANGED_RAISED_XCPT_MASK);
7584
7585#ifndef IN_NEM_DARWIN
7586 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7587 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7588 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7589 {
7590 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7591 hmR0VmxUpdateStartVmFunction(pVCpu);
7592 }
7593#endif
7594
7595 return rcStrict;
7596}
7597
7598
7599/**
7600 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7601 */
7602HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7603{
7604 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7605
7606 /** @todo Enable the new code after finding a reliably guest test-case. */
7607#if 1
7608 return VERR_EM_INTERPRETER;
7609#else
7610 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7611 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7612 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7613 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7614 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7615 AssertRCReturn(rc, rc);
7616
7617 /* Paranoia. Ensure this has a memory operand. */
7618 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7619
7620 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7621 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7622 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7623 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7624
7625 RTGCPTR GCPtrDesc;
7626 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7627
7628 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7629 GCPtrDesc, uType);
7630 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7631 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7632 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7633 {
7634 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7635 rcStrict = VINF_SUCCESS;
7636 }
7637 return rcStrict;
7638#endif
7639}
7640
7641
7642/**
7643 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7644 * VM-exit.
7645 */
7646HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7647{
7648 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7649 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7650 AssertRCReturn(rc, rc);
7651
7652 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7653 if (RT_FAILURE(rc))
7654 return rc;
7655
7656 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7657 NOREF(uInvalidReason);
7658
7659#ifdef VBOX_STRICT
7660 uint32_t fIntrState;
7661 uint64_t u64Val;
7662 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7663 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7664 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7665
7666 Log4(("uInvalidReason %u\n", uInvalidReason));
7667 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7668 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7669 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7670
7671 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7672 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7673 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7674 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7675 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7676 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7677 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7678 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7679 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7680 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7681 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7682 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7683# ifndef IN_NEM_DARWIN
7684 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7685 {
7686 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7687 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7688 }
7689
7690 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7691# endif
7692#endif
7693
7694 return VERR_VMX_INVALID_GUEST_STATE;
7695}
7696
7697/**
7698 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7699 */
7700HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7701{
7702 /*
7703 * Cumulative notes of all recognized but unexpected VM-exits.
7704 *
7705 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7706 * nested-paging is used.
7707 *
7708 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7709 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7710 * this function (and thereby stop VM execution) for handling such instructions.
7711 *
7712 *
7713 * VMX_EXIT_INIT_SIGNAL:
7714 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7715 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7716 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7717 *
7718 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7719 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7720 * See Intel spec. "23.8 Restrictions on VMX operation".
7721 *
7722 * VMX_EXIT_SIPI:
7723 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7724 * activity state is used. We don't make use of it as our guests don't have direct
7725 * access to the host local APIC.
7726 *
7727 * See Intel spec. 25.3 "Other Causes of VM-exits".
7728 *
7729 * VMX_EXIT_IO_SMI:
7730 * VMX_EXIT_SMI:
7731 * This can only happen if we support dual-monitor treatment of SMI, which can be
7732 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7733 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7734 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7735 *
7736 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7737 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7738 *
7739 * VMX_EXIT_ERR_MSR_LOAD:
7740 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7741 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7742 * execution.
7743 *
7744 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7745 *
7746 * VMX_EXIT_ERR_MACHINE_CHECK:
7747 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7748 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7749 * #MC exception abort class exception is raised. We thus cannot assume a
7750 * reasonable chance of continuing any sort of execution and we bail.
7751 *
7752 * See Intel spec. 15.1 "Machine-check Architecture".
7753 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7754 *
7755 * VMX_EXIT_PML_FULL:
7756 * VMX_EXIT_VIRTUALIZED_EOI:
7757 * VMX_EXIT_APIC_WRITE:
7758 * We do not currently support any of these features and thus they are all unexpected
7759 * VM-exits.
7760 *
7761 * VMX_EXIT_GDTR_IDTR_ACCESS:
7762 * VMX_EXIT_LDTR_TR_ACCESS:
7763 * VMX_EXIT_RDRAND:
7764 * VMX_EXIT_RSM:
7765 * VMX_EXIT_VMFUNC:
7766 * VMX_EXIT_ENCLS:
7767 * VMX_EXIT_RDSEED:
7768 * VMX_EXIT_XSAVES:
7769 * VMX_EXIT_XRSTORS:
7770 * VMX_EXIT_UMWAIT:
7771 * VMX_EXIT_TPAUSE:
7772 * VMX_EXIT_LOADIWKEY:
7773 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7774 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7775 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7776 *
7777 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7778 */
7779 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7780 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7781 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7782}
7783
7784
7785/**
7786 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7787 */
7788HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7789{
7790 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7791
7792 /** @todo Optimize this: We currently drag in the whole MSR state
7793 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7794 * MSRs required. That would require changes to IEM and possibly CPUM too.
7795 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7796 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7797 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7798 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7799 switch (idMsr)
7800 {
7801 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7802 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7803 }
7804
7805 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7806 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7807 AssertRCReturn(rc, rc);
7808
7809 Log4Func(("ecx=%#RX32\n", idMsr));
7810
7811#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7812 Assert(!pVmxTransient->fIsNestedGuest);
7813 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7814 {
7815 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7816 && idMsr != MSR_K6_EFER)
7817 {
7818 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7819 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7820 }
7821 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7822 {
7823 Assert(pVmcsInfo->pvMsrBitmap);
7824 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7825 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7826 {
7827 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7828 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7829 }
7830 }
7831 }
7832#endif
7833
7834 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7835 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7836 if (rcStrict == VINF_SUCCESS)
7837 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7838 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7839 {
7840 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7841 rcStrict = VINF_SUCCESS;
7842 }
7843 else
7844 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7845 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7846
7847 return rcStrict;
7848}
7849
7850
7851/**
7852 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7853 */
7854HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7855{
7856 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7857
7858 /** @todo Optimize this: We currently drag in the whole MSR state
7859 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7860 * MSRs required. That would require changes to IEM and possibly CPUM too.
7861 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7862 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7863 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7864
7865 /*
7866 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7867 * Although we don't need to fetch the base as it will be overwritten shortly, while
7868 * loading guest-state we would also load the entire segment register including limit
7869 * and attributes and thus we need to load them here.
7870 */
7871 switch (idMsr)
7872 {
7873 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7874 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7875 }
7876
7877 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7878 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7879 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7880 AssertRCReturn(rc, rc);
7881
7882 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7883
7884 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7885 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7886
7887 if (rcStrict == VINF_SUCCESS)
7888 {
7889 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7890
7891 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7892 if ( idMsr == MSR_IA32_APICBASE
7893 || ( idMsr >= MSR_IA32_X2APIC_START
7894 && idMsr <= MSR_IA32_X2APIC_END))
7895 {
7896 /*
7897 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7898 * When full APIC register virtualization is implemented we'll have to make
7899 * sure APIC state is saved from the VMCS before IEM changes it.
7900 */
7901 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7902 }
7903 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7904 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7905 else if (idMsr == MSR_K6_EFER)
7906 {
7907 /*
7908 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7909 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7910 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7911 */
7912 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7913 }
7914
7915 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7916 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7917 {
7918 switch (idMsr)
7919 {
7920 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7921 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7922 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7923 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7924 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7925 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7926 default:
7927 {
7928#ifndef IN_NEM_DARWIN
7929 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7930 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7931 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7932 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7933#else
7934 AssertMsgFailed(("TODO\n"));
7935#endif
7936 break;
7937 }
7938 }
7939 }
7940#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7941 else
7942 {
7943 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7944 switch (idMsr)
7945 {
7946 case MSR_IA32_SYSENTER_CS:
7947 case MSR_IA32_SYSENTER_EIP:
7948 case MSR_IA32_SYSENTER_ESP:
7949 case MSR_K8_FS_BASE:
7950 case MSR_K8_GS_BASE:
7951 {
7952 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7953 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7954 }
7955
7956 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7957 default:
7958 {
7959 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7960 {
7961 /* EFER MSR writes are always intercepted. */
7962 if (idMsr != MSR_K6_EFER)
7963 {
7964 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7965 idMsr));
7966 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7967 }
7968 }
7969
7970 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7971 {
7972 Assert(pVmcsInfo->pvMsrBitmap);
7973 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7974 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7975 {
7976 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7977 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7978 }
7979 }
7980 break;
7981 }
7982 }
7983 }
7984#endif /* VBOX_STRICT */
7985 }
7986 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7987 {
7988 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7989 rcStrict = VINF_SUCCESS;
7990 }
7991 else
7992 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7993 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7994
7995 return rcStrict;
7996}
7997
7998
7999/**
8000 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8001 */
8002HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8003{
8004 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8005
8006 /** @todo The guest has likely hit a contended spinlock. We might want to
8007 * poke a schedule different guest VCPU. */
8008 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8009 if (RT_SUCCESS(rc))
8010 return VINF_EM_RAW_INTERRUPT;
8011
8012 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8013 return rc;
8014}
8015
8016
8017/**
8018 * VM-exit handler for when the TPR value is lowered below the specified
8019 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8020 */
8021HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8022{
8023 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8024 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8025
8026 /*
8027 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8028 * We'll re-evaluate pending interrupts and inject them before the next VM
8029 * entry so we can just continue execution here.
8030 */
8031 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8032 return VINF_SUCCESS;
8033}
8034
8035
8036/**
8037 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8038 * VM-exit.
8039 *
8040 * @retval VINF_SUCCESS when guest execution can continue.
8041 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8042 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8043 * incompatible guest state for VMX execution (real-on-v86 case).
8044 */
8045HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8046{
8047 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8048 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8049
8050 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8051 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8052 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8053
8054 VBOXSTRICTRC rcStrict;
8055 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8056 uint64_t const uExitQual = pVmxTransient->uExitQual;
8057 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8058 switch (uAccessType)
8059 {
8060 /*
8061 * MOV to CRx.
8062 */
8063 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8064 {
8065 /*
8066 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8067 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8068 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8069 * PAE PDPTEs as well.
8070 */
8071 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8072 AssertRCReturn(rc, rc);
8073
8074 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8075#ifndef IN_NEM_DARWIN
8076 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8077#endif
8078 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8079 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8080
8081 /*
8082 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8083 * - When nested paging isn't used.
8084 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8085 * - We are executing in the VM debug loop.
8086 */
8087#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8088# ifndef IN_NEM_DARWIN
8089 Assert( iCrReg != 3
8090 || !VM_IS_VMX_NESTED_PAGING(pVM)
8091 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8092 || pVCpu->hmr0.s.fUsingDebugLoop);
8093# else
8094 Assert( iCrReg != 3
8095 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8096# endif
8097#endif
8098
8099 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8100 Assert( iCrReg != 8
8101 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8102
8103 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8104 AssertMsg( rcStrict == VINF_SUCCESS
8105 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8106
8107#ifndef IN_NEM_DARWIN
8108 /*
8109 * This is a kludge for handling switches back to real mode when we try to use
8110 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8111 * deal with special selector values, so we have to return to ring-3 and run
8112 * there till the selector values are V86 mode compatible.
8113 *
8114 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8115 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8116 * this function.
8117 */
8118 if ( iCrReg == 0
8119 && rcStrict == VINF_SUCCESS
8120 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8121 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8122 && (uOldCr0 & X86_CR0_PE)
8123 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8124 {
8125 /** @todo Check selectors rather than returning all the time. */
8126 Assert(!pVmxTransient->fIsNestedGuest);
8127 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8128 rcStrict = VINF_EM_RESCHEDULE_REM;
8129 }
8130#endif
8131
8132 break;
8133 }
8134
8135 /*
8136 * MOV from CRx.
8137 */
8138 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8139 {
8140 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8141 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8142
8143 /*
8144 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8145 * - When nested paging isn't used.
8146 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8147 * - We are executing in the VM debug loop.
8148 */
8149#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8150# ifndef IN_NEM_DARWIN
8151 Assert( iCrReg != 3
8152 || !VM_IS_VMX_NESTED_PAGING(pVM)
8153 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8154 || pVCpu->hmr0.s.fLeaveDone);
8155# else
8156 Assert( iCrReg != 3
8157 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8158# endif
8159#endif
8160
8161 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8162 Assert( iCrReg != 8
8163 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8164
8165 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8166 break;
8167 }
8168
8169 /*
8170 * CLTS (Clear Task-Switch Flag in CR0).
8171 */
8172 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8173 {
8174 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8175 break;
8176 }
8177
8178 /*
8179 * LMSW (Load Machine-Status Word into CR0).
8180 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8181 */
8182 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8183 {
8184 RTGCPTR GCPtrEffDst;
8185 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8186 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8187 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8188 if (fMemOperand)
8189 {
8190 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8191 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8192 }
8193 else
8194 GCPtrEffDst = NIL_RTGCPTR;
8195 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8196 break;
8197 }
8198
8199 default:
8200 {
8201 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8202 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8203 }
8204 }
8205
8206 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8207 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8208 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8209
8210 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8211 NOREF(pVM);
8212 return rcStrict;
8213}
8214
8215
8216/**
8217 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8218 * VM-exit.
8219 */
8220HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8221{
8222 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8223 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8224
8225 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8226 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8227 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8228 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8229 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8230 | CPUMCTX_EXTRN_EFER);
8231 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8232 AssertRCReturn(rc, rc);
8233
8234 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8235 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8236 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8237 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8238 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8239 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8240 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8241 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8242
8243 /*
8244 * Update exit history to see if this exit can be optimized.
8245 */
8246 VBOXSTRICTRC rcStrict;
8247 PCEMEXITREC pExitRec = NULL;
8248 if ( !fGstStepping
8249 && !fDbgStepping)
8250 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8251 !fIOString
8252 ? !fIOWrite
8253 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8254 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8255 : !fIOWrite
8256 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8257 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8258 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8259 if (!pExitRec)
8260 {
8261 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8262 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8263
8264 uint32_t const cbValue = s_aIOSizes[uIOSize];
8265 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8266 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8267 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8268 if (fIOString)
8269 {
8270 /*
8271 * INS/OUTS - I/O String instruction.
8272 *
8273 * Use instruction-information if available, otherwise fall back on
8274 * interpreting the instruction.
8275 */
8276 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8277 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8278 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8279 if (fInsOutsInfo)
8280 {
8281 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8282 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8283 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8284 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8285 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8286 if (fIOWrite)
8287 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8288 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8289 else
8290 {
8291 /*
8292 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8293 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8294 * See Intel Instruction spec. for "INS".
8295 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8296 */
8297 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8298 }
8299 }
8300 else
8301 rcStrict = IEMExecOne(pVCpu);
8302
8303 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8304 fUpdateRipAlready = true;
8305 }
8306 else
8307 {
8308 /*
8309 * IN/OUT - I/O instruction.
8310 */
8311 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8312 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8313 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8314 if (fIOWrite)
8315 {
8316 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8317 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8318#ifndef IN_NEM_DARWIN
8319 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8320 && !pCtx->eflags.Bits.u1TF)
8321 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8322#endif
8323 }
8324 else
8325 {
8326 uint32_t u32Result = 0;
8327 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8328 if (IOM_SUCCESS(rcStrict))
8329 {
8330 /* Save result of I/O IN instr. in AL/AX/EAX. */
8331 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8332 }
8333#ifndef IN_NEM_DARWIN
8334 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8335 && !pCtx->eflags.Bits.u1TF)
8336 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8337#endif
8338 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8339 }
8340 }
8341
8342 if (IOM_SUCCESS(rcStrict))
8343 {
8344 if (!fUpdateRipAlready)
8345 {
8346 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8347 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8348 }
8349
8350 /*
8351 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8352 * while booting Fedora 17 64-bit guest.
8353 *
8354 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8355 */
8356 if (fIOString)
8357 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8358
8359 /*
8360 * If any I/O breakpoints are armed, we need to check if one triggered
8361 * and take appropriate action.
8362 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8363 */
8364 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8365 AssertRCReturn(rc, rc);
8366
8367 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8368 * execution engines about whether hyper BPs and such are pending. */
8369 uint32_t const uDr7 = pCtx->dr[7];
8370 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8371 && X86_DR7_ANY_RW_IO(uDr7)
8372 && (pCtx->cr4 & X86_CR4_DE))
8373 || DBGFBpIsHwIoArmed(pVM)))
8374 {
8375 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8376
8377#ifndef IN_NEM_DARWIN
8378 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8379 VMMRZCallRing3Disable(pVCpu);
8380 HM_DISABLE_PREEMPT(pVCpu);
8381
8382 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8383
8384 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8385 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8386 {
8387 /* Raise #DB. */
8388 if (fIsGuestDbgActive)
8389 ASMSetDR6(pCtx->dr[6]);
8390 if (pCtx->dr[7] != uDr7)
8391 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8392
8393 vmxHCSetPendingXcptDB(pVCpu);
8394 }
8395 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8396 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8397 else if ( rcStrict2 != VINF_SUCCESS
8398 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8399 rcStrict = rcStrict2;
8400 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8401
8402 HM_RESTORE_PREEMPT();
8403 VMMRZCallRing3Enable(pVCpu);
8404#else
8405 /** @todo */
8406#endif
8407 }
8408 }
8409
8410#ifdef VBOX_STRICT
8411 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8412 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8413 Assert(!fIOWrite);
8414 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8415 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8416 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8417 Assert(fIOWrite);
8418 else
8419 {
8420# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8421 * statuses, that the VMM device and some others may return. See
8422 * IOM_SUCCESS() for guidance. */
8423 AssertMsg( RT_FAILURE(rcStrict)
8424 || rcStrict == VINF_SUCCESS
8425 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8426 || rcStrict == VINF_EM_DBG_BREAKPOINT
8427 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8428 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8429# endif
8430 }
8431#endif
8432 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8433 }
8434 else
8435 {
8436 /*
8437 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8438 */
8439 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8440 AssertRCReturn(rc2, rc2);
8441 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8442 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8443 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8444 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8445 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8446 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8447
8448 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8449 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8450
8451 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8452 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8453 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8454 }
8455 return rcStrict;
8456}
8457
8458
8459/**
8460 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8461 * VM-exit.
8462 */
8463HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8464{
8465 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8466
8467 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8468 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8469 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8470 {
8471 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8472 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8473 {
8474 uint32_t uErrCode;
8475 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8476 {
8477 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8478 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8479 }
8480 else
8481 uErrCode = 0;
8482
8483 RTGCUINTPTR GCPtrFaultAddress;
8484 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8485 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8486 else
8487 GCPtrFaultAddress = 0;
8488
8489 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8490
8491 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8492 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8493
8494 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8495 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8496 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8497 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8498 }
8499 }
8500
8501 /* Fall back to the interpreter to emulate the task-switch. */
8502 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8503 return VERR_EM_INTERPRETER;
8504}
8505
8506
8507/**
8508 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8509 */
8510HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8511{
8512 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8513
8514 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8515 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8516 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8517 AssertRC(rc);
8518 return VINF_EM_DBG_STEPPED;
8519}
8520
8521
8522/**
8523 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8524 */
8525HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8526{
8527 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8528 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8529
8530 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8531 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8532 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8533 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8534 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8535
8536 /*
8537 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8538 */
8539 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8540 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8541 {
8542 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8543 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8544 {
8545 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8546 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8547 }
8548 }
8549 else
8550 {
8551 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8552 return rcStrict;
8553 }
8554
8555 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8556 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8557 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8558 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8559 AssertRCReturn(rc, rc);
8560
8561 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8562 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8563 switch (uAccessType)
8564 {
8565#ifndef IN_NEM_DARWIN
8566 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8567 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8568 {
8569 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8570 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8571 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8572
8573 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8574 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8575 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8576 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8577 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8578
8579 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8580 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8581 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8582 if ( rcStrict == VINF_SUCCESS
8583 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8584 || rcStrict == VERR_PAGE_NOT_PRESENT)
8585 {
8586 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8587 | HM_CHANGED_GUEST_APIC_TPR);
8588 rcStrict = VINF_SUCCESS;
8589 }
8590 break;
8591 }
8592#else
8593 /** @todo */
8594#endif
8595
8596 default:
8597 {
8598 Log4Func(("uAccessType=%#x\n", uAccessType));
8599 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8600 break;
8601 }
8602 }
8603
8604 if (rcStrict != VINF_SUCCESS)
8605 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8606 return rcStrict;
8607}
8608
8609
8610/**
8611 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8612 * VM-exit.
8613 */
8614HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8615{
8616 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8617 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8618
8619 /*
8620 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8621 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8622 * must emulate the MOV DRx access.
8623 */
8624 if (!pVmxTransient->fIsNestedGuest)
8625 {
8626 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8627 if (pVmxTransient->fWasGuestDebugStateActive)
8628 {
8629 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8630 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8631 }
8632
8633 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8634 && !pVmxTransient->fWasHyperDebugStateActive)
8635 {
8636 Assert(!DBGFIsStepping(pVCpu));
8637 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8638
8639 /* Don't intercept MOV DRx any more. */
8640 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8641 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8642 AssertRC(rc);
8643
8644#ifndef IN_NEM_DARWIN
8645 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8646 VMMRZCallRing3Disable(pVCpu);
8647 HM_DISABLE_PREEMPT(pVCpu);
8648
8649 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8650 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8651 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8652
8653 HM_RESTORE_PREEMPT();
8654 VMMRZCallRing3Enable(pVCpu);
8655#else
8656 CPUMR3NemActivateGuestDebugState(pVCpu);
8657 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8658 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8659#endif
8660
8661#ifdef VBOX_WITH_STATISTICS
8662 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8663 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8664 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8665 else
8666 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8667#endif
8668 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8669 return VINF_SUCCESS;
8670 }
8671 }
8672
8673 /*
8674 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8675 * The EFER MSR is always up-to-date.
8676 * Update the segment registers and DR7 from the CPU.
8677 */
8678 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8679 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8680 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8681 AssertRCReturn(rc, rc);
8682 Log4Func(("cs:rip=%#04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
8683
8684 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8685 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8686 {
8687 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8688 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8689 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8690 if (RT_SUCCESS(rc))
8691 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8692 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8693 }
8694 else
8695 {
8696 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8697 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8698 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8699 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8700 }
8701
8702 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8703 if (RT_SUCCESS(rc))
8704 {
8705 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8706 AssertRCReturn(rc2, rc2);
8707 return VINF_SUCCESS;
8708 }
8709 return rc;
8710}
8711
8712
8713/**
8714 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8715 * Conditional VM-exit.
8716 */
8717HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8718{
8719 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8720
8721#ifndef IN_NEM_DARWIN
8722 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8723
8724 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8725 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8726 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8727 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8728 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8729
8730 /*
8731 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8732 */
8733 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8734 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8735 {
8736 /*
8737 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8738 * instruction emulation to inject the original event. Otherwise, injecting the original event
8739 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8740 */
8741 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8742 { /* likely */ }
8743 else
8744 {
8745 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8746#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8747 /** @todo NSTVMX: Think about how this should be handled. */
8748 if (pVmxTransient->fIsNestedGuest)
8749 return VERR_VMX_IPE_3;
8750#endif
8751 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8752 }
8753 }
8754 else
8755 {
8756 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8757 return rcStrict;
8758 }
8759
8760 /*
8761 * Get sufficient state and update the exit history entry.
8762 */
8763 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8764 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8765 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8766 AssertRCReturn(rc, rc);
8767
8768 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8769 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8770 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8771 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8772 if (!pExitRec)
8773 {
8774 /*
8775 * If we succeed, resume guest execution.
8776 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8777 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8778 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8779 * weird case. See @bugref{6043}.
8780 */
8781 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8782 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8783/** @todo bird: We can probably just go straight to IOM here and assume that
8784 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8785 * well. However, we need to address that aliasing workarounds that
8786 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8787 *
8788 * Might also be interesting to see if we can get this done more or
8789 * less locklessly inside IOM. Need to consider the lookup table
8790 * updating and use a bit more carefully first (or do all updates via
8791 * rendezvous) */
8792 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8793 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8794 if ( rcStrict == VINF_SUCCESS
8795 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8796 || rcStrict == VERR_PAGE_NOT_PRESENT)
8797 {
8798 /* Successfully handled MMIO operation. */
8799 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8800 | HM_CHANGED_GUEST_APIC_TPR);
8801 rcStrict = VINF_SUCCESS;
8802 }
8803 }
8804 else
8805 {
8806 /*
8807 * Frequent exit or something needing probing. Call EMHistoryExec.
8808 */
8809 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8810 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8811
8812 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8814
8815 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8816 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8817 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8818 }
8819 return rcStrict;
8820#else
8821 AssertFailed();
8822 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8823#endif
8824}
8825
8826
8827/**
8828 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8829 * VM-exit.
8830 */
8831HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8832{
8833 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8834#ifndef IN_NEM_DARWIN
8835 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8836
8837 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8838 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8839 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8840 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8841 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8842 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8843
8844 /*
8845 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8846 */
8847 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8848 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8849 {
8850 /*
8851 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8852 * we shall resolve the nested #PF and re-inject the original event.
8853 */
8854 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8855 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8856 }
8857 else
8858 {
8859 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8860 return rcStrict;
8861 }
8862
8863 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8864 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8865 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8866 AssertRCReturn(rc, rc);
8867
8868 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8869 uint64_t const uExitQual = pVmxTransient->uExitQual;
8870 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8871
8872 RTGCUINT uErrorCode = 0;
8873 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8874 uErrorCode |= X86_TRAP_PF_ID;
8875 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8876 uErrorCode |= X86_TRAP_PF_RW;
8877 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8878 uErrorCode |= X86_TRAP_PF_P;
8879
8880 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8881 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8882
8883 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8884
8885 /*
8886 * Handle the pagefault trap for the nested shadow table.
8887 */
8888 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8889 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8890 TRPMResetTrap(pVCpu);
8891
8892 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8893 if ( rcStrict == VINF_SUCCESS
8894 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8895 || rcStrict == VERR_PAGE_NOT_PRESENT)
8896 {
8897 /* Successfully synced our nested page tables. */
8898 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8899 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8900 return VINF_SUCCESS;
8901 }
8902#else
8903 PVM pVM = pVCpu->CTX_SUFF(pVM);
8904 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8905 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8906 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8907 vmxHCImportGuestRip(pVCpu);
8908 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8909
8910 /*
8911 * Ask PGM for information about the given GCPhys. We need to check if we're
8912 * out of sync first.
8913 */
8914 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8915 PGMPHYSNEMPAGEINFO Info;
8916 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8917 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8918 if (RT_SUCCESS(rc))
8919 {
8920 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8921 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8922 {
8923 if (State.fCanResume)
8924 {
8925 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8926 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8927 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8928 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8929 State.fDidSomething ? "" : " no-change"));
8930 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8931 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8932 return VINF_SUCCESS;
8933 }
8934 }
8935
8936 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8937 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8938 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8939 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8940 State.fDidSomething ? "" : " no-change"));
8941 }
8942 else
8943 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8944 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8945 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8946
8947 /*
8948 * Emulate the memory access, either access handler or special memory.
8949 */
8950 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8951 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8952 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8953 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8954 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8955
8956 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8957 AssertRCReturn(rc, rc);
8958
8959 VBOXSTRICTRC rcStrict;
8960 if (!pExitRec)
8961 rcStrict = IEMExecOne(pVCpu);
8962 else
8963 {
8964 /* Frequent access or probing. */
8965 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8966 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8967 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8968 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8969 }
8970
8971 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8972#endif
8973
8974 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8975 return rcStrict;
8976}
8977
8978
8979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8980/**
8981 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8982 */
8983HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8984{
8985 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8986
8987 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8988 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8989 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8990 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8991 | CPUMCTX_EXTRN_HWVIRT
8992 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8993 AssertRCReturn(rc, rc);
8994
8995 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8996
8997 VMXVEXITINFO ExitInfo;
8998 RT_ZERO(ExitInfo);
8999 ExitInfo.uReason = pVmxTransient->uExitReason;
9000 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9001 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9002 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9003 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9004
9005 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9006 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9007 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9008 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9009 {
9010 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9011 rcStrict = VINF_SUCCESS;
9012 }
9013 return rcStrict;
9014}
9015
9016
9017/**
9018 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9019 */
9020HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9021{
9022 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9023
9024 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9025 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9026 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9027 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9028 AssertRCReturn(rc, rc);
9029
9030 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9031
9032 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9033 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9034 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9035 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9036 {
9037 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9038 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9039 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9040 }
9041 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9042 return rcStrict;
9043}
9044
9045
9046/**
9047 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9048 */
9049HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9050{
9051 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9052
9053 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9054 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9055 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9056 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9057 | CPUMCTX_EXTRN_HWVIRT
9058 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9059 AssertRCReturn(rc, rc);
9060
9061 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9062
9063 VMXVEXITINFO ExitInfo;
9064 RT_ZERO(ExitInfo);
9065 ExitInfo.uReason = pVmxTransient->uExitReason;
9066 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9067 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9068 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9069 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9070
9071 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9072 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9073 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9074 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9075 {
9076 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9077 rcStrict = VINF_SUCCESS;
9078 }
9079 return rcStrict;
9080}
9081
9082
9083/**
9084 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9085 */
9086HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9087{
9088 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9089
9090 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9091 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9092 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9093 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9094 | CPUMCTX_EXTRN_HWVIRT
9095 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9096 AssertRCReturn(rc, rc);
9097
9098 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9099
9100 VMXVEXITINFO ExitInfo;
9101 RT_ZERO(ExitInfo);
9102 ExitInfo.uReason = pVmxTransient->uExitReason;
9103 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9104 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9105 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9106 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9107
9108 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9109 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9110 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9111 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9112 {
9113 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9114 rcStrict = VINF_SUCCESS;
9115 }
9116 return rcStrict;
9117}
9118
9119
9120/**
9121 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9122 */
9123HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9124{
9125 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9126
9127 /*
9128 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9129 * thus might not need to import the shadow VMCS state, it's safer just in case
9130 * code elsewhere dares look at unsynced VMCS fields.
9131 */
9132 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9133 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9134 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9135 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9136 | CPUMCTX_EXTRN_HWVIRT
9137 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9138 AssertRCReturn(rc, rc);
9139
9140 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9141
9142 VMXVEXITINFO ExitInfo;
9143 RT_ZERO(ExitInfo);
9144 ExitInfo.uReason = pVmxTransient->uExitReason;
9145 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9146 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9147 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9148 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9149 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9150
9151 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9152 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9153 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9154 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9155 {
9156 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9157 rcStrict = VINF_SUCCESS;
9158 }
9159 return rcStrict;
9160}
9161
9162
9163/**
9164 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9165 */
9166HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9167{
9168 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9169
9170 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9171 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9172 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9173 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9174 AssertRCReturn(rc, rc);
9175
9176 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9177
9178 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9179 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9180 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9181 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9182 {
9183 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9184 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9185 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9186 }
9187 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9188 return rcStrict;
9189}
9190
9191
9192/**
9193 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9194 */
9195HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9196{
9197 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9198
9199 /*
9200 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9201 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9202 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9203 */
9204 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9205 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9206 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9207 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9208 | CPUMCTX_EXTRN_HWVIRT
9209 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9210 AssertRCReturn(rc, rc);
9211
9212 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9213
9214 VMXVEXITINFO ExitInfo;
9215 RT_ZERO(ExitInfo);
9216 ExitInfo.uReason = pVmxTransient->uExitReason;
9217 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9218 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9219 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9220 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9221 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9222
9223 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9224 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9225 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9226 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9227 {
9228 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9229 rcStrict = VINF_SUCCESS;
9230 }
9231 return rcStrict;
9232}
9233
9234
9235/**
9236 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9237 */
9238HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9239{
9240 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9241
9242 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9243 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9244 | CPUMCTX_EXTRN_HWVIRT
9245 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9246 AssertRCReturn(rc, rc);
9247
9248 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9249
9250 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9251 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9252 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9253 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9254 {
9255 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9256 rcStrict = VINF_SUCCESS;
9257 }
9258 return rcStrict;
9259}
9260
9261
9262/**
9263 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9264 */
9265HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9266{
9267 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9268
9269 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9270 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9271 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9272 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9273 | CPUMCTX_EXTRN_HWVIRT
9274 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9275 AssertRCReturn(rc, rc);
9276
9277 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9278
9279 VMXVEXITINFO ExitInfo;
9280 RT_ZERO(ExitInfo);
9281 ExitInfo.uReason = pVmxTransient->uExitReason;
9282 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9283 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9284 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9285 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9286
9287 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9288 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9289 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9290 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9291 {
9292 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9293 rcStrict = VINF_SUCCESS;
9294 }
9295 return rcStrict;
9296}
9297
9298
9299/**
9300 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9301 */
9302HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9303{
9304 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9305
9306 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9307 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9308 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9309 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9310 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9311 AssertRCReturn(rc, rc);
9312
9313 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9314
9315 VMXVEXITINFO ExitInfo;
9316 RT_ZERO(ExitInfo);
9317 ExitInfo.uReason = pVmxTransient->uExitReason;
9318 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9319 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9320 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9321 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9322
9323 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9324 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9325 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9326 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9327 {
9328 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9329 rcStrict = VINF_SUCCESS;
9330 }
9331 return rcStrict;
9332}
9333
9334
9335# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9336/**
9337 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9338 */
9339HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9340{
9341 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9342
9343 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9344 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9345 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9346 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9347 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9348 AssertRCReturn(rc, rc);
9349
9350 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9351
9352 VMXVEXITINFO ExitInfo;
9353 RT_ZERO(ExitInfo);
9354 ExitInfo.uReason = pVmxTransient->uExitReason;
9355 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9356 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9357 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9358 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9359
9360 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9361 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9362 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9363 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9364 {
9365 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9366 rcStrict = VINF_SUCCESS;
9367 }
9368 return rcStrict;
9369}
9370# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9371#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9372/** @} */
9373
9374
9375#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9376/** @name Nested-guest VM-exit handlers.
9377 * @{
9378 */
9379/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9380/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9381/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9382
9383/**
9384 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9385 * Conditional VM-exit.
9386 */
9387HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9388{
9389 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9390
9391 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9392
9393 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9394 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9395 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9396
9397 switch (uExitIntType)
9398 {
9399#ifndef IN_NEM_DARWIN
9400 /*
9401 * Physical NMIs:
9402 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9403 */
9404 case VMX_EXIT_INT_INFO_TYPE_NMI:
9405 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9406#endif
9407
9408 /*
9409 * Hardware exceptions,
9410 * Software exceptions,
9411 * Privileged software exceptions:
9412 * Figure out if the exception must be delivered to the guest or the nested-guest.
9413 */
9414 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9415 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9416 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9417 {
9418 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9419 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9420 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9421 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9422
9423 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9424 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9425 pVmxTransient->uExitIntErrorCode);
9426 if (fIntercept)
9427 {
9428 /* Exit qualification is required for debug and page-fault exceptions. */
9429 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9430
9431 /*
9432 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9433 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9434 * length. However, if delivery of a software interrupt, software exception or privileged
9435 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9436 */
9437 VMXVEXITINFO ExitInfo;
9438 RT_ZERO(ExitInfo);
9439 ExitInfo.uReason = pVmxTransient->uExitReason;
9440 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9441 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9442
9443 VMXVEXITEVENTINFO ExitEventInfo;
9444 RT_ZERO(ExitEventInfo);
9445 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9446 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9447 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9448 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9449
9450#ifdef DEBUG_ramshankar
9451 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9452 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9453 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9454 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9455 {
9456 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9457 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9458 }
9459#endif
9460 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9461 }
9462
9463 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9464 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9465 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9466 }
9467
9468 /*
9469 * Software interrupts:
9470 * VM-exits cannot be caused by software interrupts.
9471 *
9472 * External interrupts:
9473 * This should only happen when "acknowledge external interrupts on VM-exit"
9474 * control is set. However, we never set this when executing a guest or
9475 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9476 * the guest.
9477 */
9478 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9479 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9480 default:
9481 {
9482 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9483 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9484 }
9485 }
9486}
9487
9488
9489/**
9490 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9491 * Unconditional VM-exit.
9492 */
9493HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9494{
9495 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9496 return IEMExecVmxVmexitTripleFault(pVCpu);
9497}
9498
9499
9500/**
9501 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9502 */
9503HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9504{
9505 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9506
9507 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9508 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9509 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9510}
9511
9512
9513/**
9514 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9515 */
9516HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9517{
9518 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9519
9520 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9521 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9522 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9523}
9524
9525
9526/**
9527 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9528 * Unconditional VM-exit.
9529 */
9530HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9531{
9532 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9533
9534 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9535 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9536 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9537 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9538
9539 VMXVEXITINFO ExitInfo;
9540 RT_ZERO(ExitInfo);
9541 ExitInfo.uReason = pVmxTransient->uExitReason;
9542 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9543 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9544
9545 VMXVEXITEVENTINFO ExitEventInfo;
9546 RT_ZERO(ExitEventInfo);
9547 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9548 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9549 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9550}
9551
9552
9553/**
9554 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9555 */
9556HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9557{
9558 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9559
9560 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9561 {
9562 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9563 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9564 }
9565 return vmxHCExitHlt(pVCpu, pVmxTransient);
9566}
9567
9568
9569/**
9570 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9571 */
9572HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9573{
9574 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9575
9576 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9577 {
9578 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9579 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9580
9581 VMXVEXITINFO ExitInfo;
9582 RT_ZERO(ExitInfo);
9583 ExitInfo.uReason = pVmxTransient->uExitReason;
9584 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9585 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9586 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9587 }
9588 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9589}
9590
9591
9592/**
9593 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9594 */
9595HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9596{
9597 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9598
9599 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9600 {
9601 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9602 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9603 }
9604 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9605}
9606
9607
9608/**
9609 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9610 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9611 */
9612HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9613{
9614 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9615
9616 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9617 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9618
9619 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9620
9621 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9622 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9623 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9624
9625 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9626 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9627 u64VmcsField &= UINT64_C(0xffffffff);
9628
9629 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9630 {
9631 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9632 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9633
9634 VMXVEXITINFO ExitInfo;
9635 RT_ZERO(ExitInfo);
9636 ExitInfo.uReason = pVmxTransient->uExitReason;
9637 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9638 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9639 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9640 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9641 }
9642
9643 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9644 return vmxHCExitVmread(pVCpu, pVmxTransient);
9645 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9646}
9647
9648
9649/**
9650 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9651 */
9652HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9653{
9654 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9655
9656 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9657 {
9658 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9659 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9660 }
9661
9662 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9663}
9664
9665
9666/**
9667 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9668 * Conditional VM-exit.
9669 */
9670HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9671{
9672 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9673
9674 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9675 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9676
9677 VBOXSTRICTRC rcStrict;
9678 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9679 switch (uAccessType)
9680 {
9681 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9682 {
9683 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9684 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9685 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9686 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9687
9688 bool fIntercept;
9689 switch (iCrReg)
9690 {
9691 case 0:
9692 case 4:
9693 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9694 break;
9695
9696 case 3:
9697 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9698 break;
9699
9700 case 8:
9701 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9702 break;
9703
9704 default:
9705 fIntercept = false;
9706 break;
9707 }
9708 if (fIntercept)
9709 {
9710 VMXVEXITINFO ExitInfo;
9711 RT_ZERO(ExitInfo);
9712 ExitInfo.uReason = pVmxTransient->uExitReason;
9713 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9714 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9715 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9716 }
9717 else
9718 {
9719 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9720 AssertRCReturn(rc, rc);
9721 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9722 }
9723 break;
9724 }
9725
9726 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9727 {
9728 /*
9729 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9730 * CR2 reads do not cause a VM-exit.
9731 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9732 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9733 */
9734 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9735 if ( iCrReg == 3
9736 || iCrReg == 8)
9737 {
9738 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9739 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9740 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9741 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9742 {
9743 VMXVEXITINFO ExitInfo;
9744 RT_ZERO(ExitInfo);
9745 ExitInfo.uReason = pVmxTransient->uExitReason;
9746 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9747 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9748 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9749 }
9750 else
9751 {
9752 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9753 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9754 }
9755 }
9756 else
9757 {
9758 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9759 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9760 }
9761 break;
9762 }
9763
9764 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9765 {
9766 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9767 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9768 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9769 if ( (uGstHostMask & X86_CR0_TS)
9770 && (uReadShadow & X86_CR0_TS))
9771 {
9772 VMXVEXITINFO ExitInfo;
9773 RT_ZERO(ExitInfo);
9774 ExitInfo.uReason = pVmxTransient->uExitReason;
9775 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9776 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9777 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9778 }
9779 else
9780 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9781 break;
9782 }
9783
9784 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9785 {
9786 RTGCPTR GCPtrEffDst;
9787 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9788 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9789 if (fMemOperand)
9790 {
9791 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9792 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9793 }
9794 else
9795 GCPtrEffDst = NIL_RTGCPTR;
9796
9797 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9798 {
9799 VMXVEXITINFO ExitInfo;
9800 RT_ZERO(ExitInfo);
9801 ExitInfo.uReason = pVmxTransient->uExitReason;
9802 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9803 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9804 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9805 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9806 }
9807 else
9808 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9809 break;
9810 }
9811
9812 default:
9813 {
9814 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9815 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9816 }
9817 }
9818
9819 if (rcStrict == VINF_IEM_RAISED_XCPT)
9820 {
9821 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9822 rcStrict = VINF_SUCCESS;
9823 }
9824 return rcStrict;
9825}
9826
9827
9828/**
9829 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9830 * Conditional VM-exit.
9831 */
9832HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9833{
9834 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9835
9836 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9837 {
9838 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9839 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9840
9841 VMXVEXITINFO ExitInfo;
9842 RT_ZERO(ExitInfo);
9843 ExitInfo.uReason = pVmxTransient->uExitReason;
9844 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9845 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9846 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9847 }
9848 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9849}
9850
9851
9852/**
9853 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9854 * Conditional VM-exit.
9855 */
9856HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9857{
9858 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9859
9860 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9861
9862 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9863 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9864 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9865
9866 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9867 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9868 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9869 {
9870 /*
9871 * IN/OUT instruction:
9872 * - Provides VM-exit instruction length.
9873 *
9874 * INS/OUTS instruction:
9875 * - Provides VM-exit instruction length.
9876 * - Provides Guest-linear address.
9877 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9878 */
9879 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9880 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9881
9882 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9883 pVmxTransient->ExitInstrInfo.u = 0;
9884 pVmxTransient->uGuestLinearAddr = 0;
9885
9886 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9887 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9888 if (fIOString)
9889 {
9890 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9891 if (fVmxInsOutsInfo)
9892 {
9893 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9894 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9895 }
9896 }
9897
9898 VMXVEXITINFO ExitInfo;
9899 RT_ZERO(ExitInfo);
9900 ExitInfo.uReason = pVmxTransient->uExitReason;
9901 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9902 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9903 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9904 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9905 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9906 }
9907 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9908}
9909
9910
9911/**
9912 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9913 */
9914HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9915{
9916 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9917
9918 uint32_t fMsrpm;
9919 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9920 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9921 else
9922 fMsrpm = VMXMSRPM_EXIT_RD;
9923
9924 if (fMsrpm & VMXMSRPM_EXIT_RD)
9925 {
9926 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9927 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9928 }
9929 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9930}
9931
9932
9933/**
9934 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9935 */
9936HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9937{
9938 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9939
9940 uint32_t fMsrpm;
9941 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9942 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9943 else
9944 fMsrpm = VMXMSRPM_EXIT_WR;
9945
9946 if (fMsrpm & VMXMSRPM_EXIT_WR)
9947 {
9948 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9949 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9950 }
9951 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9952}
9953
9954
9955/**
9956 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9957 */
9958HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9959{
9960 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9961
9962 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9963 {
9964 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9965 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9966 }
9967 return vmxHCExitMwait(pVCpu, pVmxTransient);
9968}
9969
9970
9971/**
9972 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9973 * VM-exit.
9974 */
9975HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9976{
9977 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9978
9979 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9980 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9981 VMXVEXITINFO ExitInfo;
9982 RT_ZERO(ExitInfo);
9983 ExitInfo.uReason = pVmxTransient->uExitReason;
9984 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9985 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9986}
9987
9988
9989/**
9990 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9991 */
9992HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9993{
9994 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9995
9996 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9997 {
9998 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9999 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10000 }
10001 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10002}
10003
10004
10005/**
10006 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10007 */
10008HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10009{
10010 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10011
10012 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10013 * PAUSE when executing a nested-guest? If it does not, we would not need
10014 * to check for the intercepts here. Just call VM-exit... */
10015
10016 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10017 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10018 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10019 {
10020 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10021 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10022 }
10023 return vmxHCExitPause(pVCpu, pVmxTransient);
10024}
10025
10026
10027/**
10028 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10029 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10030 */
10031HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10032{
10033 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10034
10035 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10036 {
10037 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
10038 VMXVEXITINFO ExitInfo;
10039 RT_ZERO(ExitInfo);
10040 ExitInfo.uReason = pVmxTransient->uExitReason;
10041 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
10042 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10043 }
10044 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10045}
10046
10047
10048/**
10049 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10050 * VM-exit.
10051 */
10052HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10053{
10054 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10055
10056 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10057 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10058 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10059 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10060
10061 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10062
10063 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10064 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10065
10066 VMXVEXITINFO ExitInfo;
10067 RT_ZERO(ExitInfo);
10068 ExitInfo.uReason = pVmxTransient->uExitReason;
10069 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10070 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10071
10072 VMXVEXITEVENTINFO ExitEventInfo;
10073 RT_ZERO(ExitEventInfo);
10074 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10075 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10076 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10077}
10078
10079
10080/**
10081 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10082 * Conditional VM-exit.
10083 */
10084HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10085{
10086 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10087
10088 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10089 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10090 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10091}
10092
10093
10094/**
10095 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10096 * Conditional VM-exit.
10097 */
10098HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10099{
10100 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10101
10102 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10103 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10104 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10105}
10106
10107
10108/**
10109 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10110 */
10111HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10112{
10113 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10114
10115 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10116 {
10117 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10118 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10119 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10120 }
10121 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10122}
10123
10124
10125/**
10126 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10127 */
10128HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10129{
10130 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10131
10132 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10133 {
10134 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10135 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10136 }
10137 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10138}
10139
10140
10141/**
10142 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10143 */
10144HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10145{
10146 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10147
10148 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10149 {
10150 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10151 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10152 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10153 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10154
10155 VMXVEXITINFO ExitInfo;
10156 RT_ZERO(ExitInfo);
10157 ExitInfo.uReason = pVmxTransient->uExitReason;
10158 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10159 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10160 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10161 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10162 }
10163 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10164}
10165
10166
10167/**
10168 * Nested-guest VM-exit handler for invalid-guest state
10169 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10170 */
10171HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10172{
10173 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10174
10175 /*
10176 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10177 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10178 * Handle it like it's in an invalid guest state of the outer guest.
10179 *
10180 * When the fast path is implemented, this should be changed to cause the corresponding
10181 * nested-guest VM-exit.
10182 */
10183 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10184}
10185
10186
10187/**
10188 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10189 * and only provide the instruction length.
10190 *
10191 * Unconditional VM-exit.
10192 */
10193HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10194{
10195 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10196
10197#ifdef VBOX_STRICT
10198 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10199 switch (pVmxTransient->uExitReason)
10200 {
10201 case VMX_EXIT_ENCLS:
10202 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10203 break;
10204
10205 case VMX_EXIT_VMFUNC:
10206 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10207 break;
10208 }
10209#endif
10210
10211 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10212 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10213}
10214
10215
10216/**
10217 * Nested-guest VM-exit handler for instructions that provide instruction length as
10218 * well as more information.
10219 *
10220 * Unconditional VM-exit.
10221 */
10222HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10223{
10224 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10225
10226# ifdef VBOX_STRICT
10227 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10228 switch (pVmxTransient->uExitReason)
10229 {
10230 case VMX_EXIT_GDTR_IDTR_ACCESS:
10231 case VMX_EXIT_LDTR_TR_ACCESS:
10232 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10233 break;
10234
10235 case VMX_EXIT_RDRAND:
10236 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10237 break;
10238
10239 case VMX_EXIT_RDSEED:
10240 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10241 break;
10242
10243 case VMX_EXIT_XSAVES:
10244 case VMX_EXIT_XRSTORS:
10245 /** @todo NSTVMX: Verify XSS-bitmap. */
10246 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10247 break;
10248
10249 case VMX_EXIT_UMWAIT:
10250 case VMX_EXIT_TPAUSE:
10251 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10252 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10253 break;
10254
10255 case VMX_EXIT_LOADIWKEY:
10256 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10257 break;
10258 }
10259# endif
10260
10261 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10262 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10263 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10264
10265 VMXVEXITINFO ExitInfo;
10266 RT_ZERO(ExitInfo);
10267 ExitInfo.uReason = pVmxTransient->uExitReason;
10268 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10269 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10270 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10271 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10272}
10273
10274# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10275
10276/**
10277 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10278 * Conditional VM-exit.
10279 */
10280HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10281{
10282 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10283 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10284
10285 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10286 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10287 {
10288 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10289 AssertRCReturn(rc, rc);
10290
10291 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10292 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10293
10294 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10295 uint64_t const uExitQual = pVmxTransient->uExitQual;
10296
10297 RTGCPTR GCPtrNestedFault;
10298 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10299 if (fIsLinearAddrValid)
10300 {
10301 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10302 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10303 }
10304 else
10305 GCPtrNestedFault = 0;
10306
10307 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10308 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10309 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10310 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10311 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10312
10313 PGMPTWALK Walk;
10314 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10315 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx),
10316 GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault,
10317 &Walk);
10318 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10319 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10320 if (RT_SUCCESS(rcStrict))
10321 {
10322#if 1
10323 /*
10324 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10325 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10326 * it's its problem to deal with that issue. This means that it's troublesome to
10327 * call vmxHCCheckExitDueToEventDelivery before PGMR0NestedTrap0eHandlerNestedPaging
10328 * have decided who's VMEXIT it is. Unfortunately, we're a bit of a pickle then if
10329 * we end up with an informational status here, as we _must_ _not_ drop events either.
10330 */
10331 /** @todo need better solution for this. Better solution should probably be
10332 * applied to other exits too... */
10333 if (rcStrict == VINF_SUCCESS)
10334 {
10335 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
10336 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10337 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10338 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10339 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10340
10341 vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10342 }
10343#endif
10344 return rcStrict;
10345 }
10346
10347 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10348 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10349 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10350
10351 VMXVEXITEVENTINFO ExitEventInfo;
10352 RT_ZERO(ExitEventInfo);
10353 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10354 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10355
10356 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10357 {
10358 VMXVEXITINFO ExitInfo;
10359 RT_ZERO(ExitInfo);
10360 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION;
10361 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10362 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10363 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
10364 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr;
10365 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10366 }
10367
10368 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10369 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10370 }
10371
10372 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10373}
10374
10375
10376/**
10377 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10378 * Conditional VM-exit.
10379 */
10380HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10381{
10382 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10383 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10384
10385 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10386 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10387 {
10388 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_ALL);
10389 AssertRCReturn(rc, rc);
10390
10391 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10392
10393 PGMPTWALK Walk;
10394 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10395 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10396 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10397 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10398 0 /* GCPtrNestedFault */, &Walk);
10399 if (RT_SUCCESS(rcStrict))
10400 {
10401 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10402 return rcStrict;
10403 }
10404
10405 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10406 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10407 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10408
10409 VMXVEXITEVENTINFO ExitEventInfo;
10410 RT_ZERO(ExitEventInfo);
10411 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10412 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10413
10414 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10415 }
10416
10417 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10418}
10419
10420# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10421
10422/** @} */
10423#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10424
10425
10426/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10427 * probes.
10428 *
10429 * The following few functions and associated structure contains the bloat
10430 * necessary for providing detailed debug events and dtrace probes as well as
10431 * reliable host side single stepping. This works on the principle of
10432 * "subclassing" the normal execution loop and workers. We replace the loop
10433 * method completely and override selected helpers to add necessary adjustments
10434 * to their core operation.
10435 *
10436 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10437 * any performance for debug and analysis features.
10438 *
10439 * @{
10440 */
10441
10442/**
10443 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10444 * the debug run loop.
10445 */
10446typedef struct VMXRUNDBGSTATE
10447{
10448 /** The RIP we started executing at. This is for detecting that we stepped. */
10449 uint64_t uRipStart;
10450 /** The CS we started executing with. */
10451 uint16_t uCsStart;
10452
10453 /** Whether we've actually modified the 1st execution control field. */
10454 bool fModifiedProcCtls : 1;
10455 /** Whether we've actually modified the 2nd execution control field. */
10456 bool fModifiedProcCtls2 : 1;
10457 /** Whether we've actually modified the exception bitmap. */
10458 bool fModifiedXcptBitmap : 1;
10459
10460 /** We desire the modified the CR0 mask to be cleared. */
10461 bool fClearCr0Mask : 1;
10462 /** We desire the modified the CR4 mask to be cleared. */
10463 bool fClearCr4Mask : 1;
10464 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10465 uint32_t fCpe1Extra;
10466 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10467 uint32_t fCpe1Unwanted;
10468 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10469 uint32_t fCpe2Extra;
10470 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10471 uint32_t bmXcptExtra;
10472 /** The sequence number of the Dtrace provider settings the state was
10473 * configured against. */
10474 uint32_t uDtraceSettingsSeqNo;
10475 /** VM-exits to check (one bit per VM-exit). */
10476 uint32_t bmExitsToCheck[3];
10477
10478 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10479 uint32_t fProcCtlsInitial;
10480 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10481 uint32_t fProcCtls2Initial;
10482 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10483 uint32_t bmXcptInitial;
10484} VMXRUNDBGSTATE;
10485AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10486typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10487
10488
10489/**
10490 * Initializes the VMXRUNDBGSTATE structure.
10491 *
10492 * @param pVCpu The cross context virtual CPU structure of the
10493 * calling EMT.
10494 * @param pVmxTransient The VMX-transient structure.
10495 * @param pDbgState The debug state to initialize.
10496 */
10497static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10498{
10499 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10500 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10501
10502 pDbgState->fModifiedProcCtls = false;
10503 pDbgState->fModifiedProcCtls2 = false;
10504 pDbgState->fModifiedXcptBitmap = false;
10505 pDbgState->fClearCr0Mask = false;
10506 pDbgState->fClearCr4Mask = false;
10507 pDbgState->fCpe1Extra = 0;
10508 pDbgState->fCpe1Unwanted = 0;
10509 pDbgState->fCpe2Extra = 0;
10510 pDbgState->bmXcptExtra = 0;
10511 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10512 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10513 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10514}
10515
10516
10517/**
10518 * Updates the VMSC fields with changes requested by @a pDbgState.
10519 *
10520 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10521 * immediately before executing guest code, i.e. when interrupts are disabled.
10522 * We don't check status codes here as we cannot easily assert or return in the
10523 * latter case.
10524 *
10525 * @param pVCpu The cross context virtual CPU structure.
10526 * @param pVmxTransient The VMX-transient structure.
10527 * @param pDbgState The debug state.
10528 */
10529static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10530{
10531 /*
10532 * Ensure desired flags in VMCS control fields are set.
10533 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10534 *
10535 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10536 * there should be no stale data in pCtx at this point.
10537 */
10538 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10539 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10540 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10541 {
10542 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10543 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10544 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10545 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10546 pDbgState->fModifiedProcCtls = true;
10547 }
10548
10549 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10550 {
10551 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10552 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10553 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10554 pDbgState->fModifiedProcCtls2 = true;
10555 }
10556
10557 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10558 {
10559 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10560 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10561 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10562 pDbgState->fModifiedXcptBitmap = true;
10563 }
10564
10565 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10566 {
10567 pVmcsInfo->u64Cr0Mask = 0;
10568 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10569 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10570 }
10571
10572 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10573 {
10574 pVmcsInfo->u64Cr4Mask = 0;
10575 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10576 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10577 }
10578
10579 NOREF(pVCpu);
10580}
10581
10582
10583/**
10584 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10585 * re-entry next time around.
10586 *
10587 * @returns Strict VBox status code (i.e. informational status codes too).
10588 * @param pVCpu The cross context virtual CPU structure.
10589 * @param pVmxTransient The VMX-transient structure.
10590 * @param pDbgState The debug state.
10591 * @param rcStrict The return code from executing the guest using single
10592 * stepping.
10593 */
10594static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10595 VBOXSTRICTRC rcStrict)
10596{
10597 /*
10598 * Restore VM-exit control settings as we may not reenter this function the
10599 * next time around.
10600 */
10601 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10602
10603 /* We reload the initial value, trigger what we can of recalculations the
10604 next time around. From the looks of things, that's all that's required atm. */
10605 if (pDbgState->fModifiedProcCtls)
10606 {
10607 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10608 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10609 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10610 AssertRC(rc2);
10611 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10612 }
10613
10614 /* We're currently the only ones messing with this one, so just restore the
10615 cached value and reload the field. */
10616 if ( pDbgState->fModifiedProcCtls2
10617 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10618 {
10619 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10620 AssertRC(rc2);
10621 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10622 }
10623
10624 /* If we've modified the exception bitmap, we restore it and trigger
10625 reloading and partial recalculation the next time around. */
10626 if (pDbgState->fModifiedXcptBitmap)
10627 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10628
10629 return rcStrict;
10630}
10631
10632
10633/**
10634 * Configures VM-exit controls for current DBGF and DTrace settings.
10635 *
10636 * This updates @a pDbgState and the VMCS execution control fields to reflect
10637 * the necessary VM-exits demanded by DBGF and DTrace.
10638 *
10639 * @param pVCpu The cross context virtual CPU structure.
10640 * @param pVmxTransient The VMX-transient structure. May update
10641 * fUpdatedTscOffsettingAndPreemptTimer.
10642 * @param pDbgState The debug state.
10643 */
10644static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10645{
10646#ifndef IN_NEM_DARWIN
10647 /*
10648 * Take down the dtrace serial number so we can spot changes.
10649 */
10650 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10651 ASMCompilerBarrier();
10652#endif
10653
10654 /*
10655 * We'll rebuild most of the middle block of data members (holding the
10656 * current settings) as we go along here, so start by clearing it all.
10657 */
10658 pDbgState->bmXcptExtra = 0;
10659 pDbgState->fCpe1Extra = 0;
10660 pDbgState->fCpe1Unwanted = 0;
10661 pDbgState->fCpe2Extra = 0;
10662 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10663 pDbgState->bmExitsToCheck[i] = 0;
10664
10665 /*
10666 * Software interrupts (INT XXh) - no idea how to trigger these...
10667 */
10668 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10669 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10670 || VBOXVMM_INT_SOFTWARE_ENABLED())
10671 {
10672 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10673 }
10674
10675 /*
10676 * INT3 breakpoints - triggered by #BP exceptions.
10677 */
10678 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10679 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10680
10681 /*
10682 * Exception bitmap and XCPT events+probes.
10683 */
10684 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10685 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10686 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10687
10688 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10689 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10690 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10691 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10692 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10693 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10694 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10695 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10696 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10697 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10698 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10699 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10700 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10701 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10702 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10703 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10704 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10705 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10706
10707 if (pDbgState->bmXcptExtra)
10708 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10709
10710 /*
10711 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10712 *
10713 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10714 * So, when adding/changing/removing please don't forget to update it.
10715 *
10716 * Some of the macros are picking up local variables to save horizontal space,
10717 * (being able to see it in a table is the lesser evil here).
10718 */
10719#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10720 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10721 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10722#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10723 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10724 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10725 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10726 } else do { } while (0)
10727#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10728 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10729 { \
10730 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10731 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10732 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10733 } else do { } while (0)
10734#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10735 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10736 { \
10737 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10738 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10739 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10740 } else do { } while (0)
10741#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10742 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10743 { \
10744 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10745 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10746 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10747 } else do { } while (0)
10748
10749 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10750 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10751 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10752 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10753 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10754
10755 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10756 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10757 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10758 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10759 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10760 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10761 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10762 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10763 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10764 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10765 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10766 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10767 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10768 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10769 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10770 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10771 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10772 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10773 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10774 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10775 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10776 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10777 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10778 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10779 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10780 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10781 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10782 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10783 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10784 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10785 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10786 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10787 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10788 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10789 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10790 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10791
10792 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10793 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10794 {
10795 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10796 | CPUMCTX_EXTRN_APIC_TPR);
10797 AssertRC(rc);
10798
10799#if 0 /** @todo fix me */
10800 pDbgState->fClearCr0Mask = true;
10801 pDbgState->fClearCr4Mask = true;
10802#endif
10803 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10804 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10805 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10806 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10807 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10808 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10809 require clearing here and in the loop if we start using it. */
10810 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10811 }
10812 else
10813 {
10814 if (pDbgState->fClearCr0Mask)
10815 {
10816 pDbgState->fClearCr0Mask = false;
10817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10818 }
10819 if (pDbgState->fClearCr4Mask)
10820 {
10821 pDbgState->fClearCr4Mask = false;
10822 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10823 }
10824 }
10825 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10826 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10827
10828 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10829 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10830 {
10831 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10832 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10833 }
10834 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10835 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10836
10837 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10838 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10839 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10840 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10841 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10842 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10843 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10844 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10845#if 0 /** @todo too slow, fix handler. */
10846 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10847#endif
10848 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10849
10850 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10851 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10852 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10853 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10854 {
10855 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10856 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10857 }
10858 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10859 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10860 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10861 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10862
10863 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10864 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10865 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10866 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10867 {
10868 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10869 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10870 }
10871 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10872 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10873 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10874 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10875
10876 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10877 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10878 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10879 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10880 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10881 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10882 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10883 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10884 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10885 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10886 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10887 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10888 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10889 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10890 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10891 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10892 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10893 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10894 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10895 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10896 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10897 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10898
10899#undef IS_EITHER_ENABLED
10900#undef SET_ONLY_XBM_IF_EITHER_EN
10901#undef SET_CPE1_XBM_IF_EITHER_EN
10902#undef SET_CPEU_XBM_IF_EITHER_EN
10903#undef SET_CPE2_XBM_IF_EITHER_EN
10904
10905 /*
10906 * Sanitize the control stuff.
10907 */
10908 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10909 if (pDbgState->fCpe2Extra)
10910 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10911 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10912 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10913#ifndef IN_NEM_DARWIN
10914 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10915 {
10916 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10917 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10918 }
10919#else
10920 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10921 {
10922 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10923 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10924 }
10925#endif
10926
10927 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10928 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10929 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10930 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10931}
10932
10933
10934/**
10935 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10936 * appropriate.
10937 *
10938 * The caller has checked the VM-exit against the
10939 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10940 * already, so we don't have to do that either.
10941 *
10942 * @returns Strict VBox status code (i.e. informational status codes too).
10943 * @param pVCpu The cross context virtual CPU structure.
10944 * @param pVmxTransient The VMX-transient structure.
10945 * @param uExitReason The VM-exit reason.
10946 *
10947 * @remarks The name of this function is displayed by dtrace, so keep it short
10948 * and to the point. No longer than 33 chars long, please.
10949 */
10950static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10951{
10952 /*
10953 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10954 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10955 *
10956 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10957 * does. Must add/change/remove both places. Same ordering, please.
10958 *
10959 * Added/removed events must also be reflected in the next section
10960 * where we dispatch dtrace events.
10961 */
10962 bool fDtrace1 = false;
10963 bool fDtrace2 = false;
10964 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10965 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10966 uint32_t uEventArg = 0;
10967#define SET_EXIT(a_EventSubName) \
10968 do { \
10969 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10970 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10971 } while (0)
10972#define SET_BOTH(a_EventSubName) \
10973 do { \
10974 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10975 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10976 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10977 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10978 } while (0)
10979 switch (uExitReason)
10980 {
10981 case VMX_EXIT_MTF:
10982 return vmxHCExitMtf(pVCpu, pVmxTransient);
10983
10984 case VMX_EXIT_XCPT_OR_NMI:
10985 {
10986 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10987 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10988 {
10989 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10990 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10991 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10992 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10993 {
10994 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10995 {
10996 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10997 uEventArg = pVmxTransient->uExitIntErrorCode;
10998 }
10999 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11000 switch (enmEvent1)
11001 {
11002 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11003 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11004 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11005 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11006 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11007 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11008 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11009 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11010 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11011 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11012 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11013 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11014 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11015 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11016 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11017 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11018 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11019 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11020 default: break;
11021 }
11022 }
11023 else
11024 AssertFailed();
11025 break;
11026
11027 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11028 uEventArg = idxVector;
11029 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11030 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11031 break;
11032 }
11033 break;
11034 }
11035
11036 case VMX_EXIT_TRIPLE_FAULT:
11037 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11038 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11039 break;
11040 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11041 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11042 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11043 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11044 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11045
11046 /* Instruction specific VM-exits: */
11047 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11048 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11049 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11050 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11051 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11052 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11053 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11054 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11055 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11056 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11057 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11058 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11059 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11060 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11061 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11062 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11063 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11064 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11065 case VMX_EXIT_MOV_CRX:
11066 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11067 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11068 SET_BOTH(CRX_READ);
11069 else
11070 SET_BOTH(CRX_WRITE);
11071 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11072 break;
11073 case VMX_EXIT_MOV_DRX:
11074 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11075 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11076 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11077 SET_BOTH(DRX_READ);
11078 else
11079 SET_BOTH(DRX_WRITE);
11080 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11081 break;
11082 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11083 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11084 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11085 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11086 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11087 case VMX_EXIT_GDTR_IDTR_ACCESS:
11088 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11089 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11090 {
11091 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11092 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11093 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11094 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11095 }
11096 break;
11097
11098 case VMX_EXIT_LDTR_TR_ACCESS:
11099 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11100 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11101 {
11102 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11103 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11104 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11105 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11106 }
11107 break;
11108
11109 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11110 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11111 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11112 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11113 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11114 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11115 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11116 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11117 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11118 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11119 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11120
11121 /* Events that aren't relevant at this point. */
11122 case VMX_EXIT_EXT_INT:
11123 case VMX_EXIT_INT_WINDOW:
11124 case VMX_EXIT_NMI_WINDOW:
11125 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11126 case VMX_EXIT_PREEMPT_TIMER:
11127 case VMX_EXIT_IO_INSTR:
11128 break;
11129
11130 /* Errors and unexpected events. */
11131 case VMX_EXIT_INIT_SIGNAL:
11132 case VMX_EXIT_SIPI:
11133 case VMX_EXIT_IO_SMI:
11134 case VMX_EXIT_SMI:
11135 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11136 case VMX_EXIT_ERR_MSR_LOAD:
11137 case VMX_EXIT_ERR_MACHINE_CHECK:
11138 case VMX_EXIT_PML_FULL:
11139 case VMX_EXIT_VIRTUALIZED_EOI:
11140 break;
11141
11142 default:
11143 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11144 break;
11145 }
11146#undef SET_BOTH
11147#undef SET_EXIT
11148
11149 /*
11150 * Dtrace tracepoints go first. We do them here at once so we don't
11151 * have to copy the guest state saving and stuff a few dozen times.
11152 * Down side is that we've got to repeat the switch, though this time
11153 * we use enmEvent since the probes are a subset of what DBGF does.
11154 */
11155 if (fDtrace1 || fDtrace2)
11156 {
11157 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11158 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11159 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11160 switch (enmEvent1)
11161 {
11162 /** @todo consider which extra parameters would be helpful for each probe. */
11163 case DBGFEVENT_END: break;
11164 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11165 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11166 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11167 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11168 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11169 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11170 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11171 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11172 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11173 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11174 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11175 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11176 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11177 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11178 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11179 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11180 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11181 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11182 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11183 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11184 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11185 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11186 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11187 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11188 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11189 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11190 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11191 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11192 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11193 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11194 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11195 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11196 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11197 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11198 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11199 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11200 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11201 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11202 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11203 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11204 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11205 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11206 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11207 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11208 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11209 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11210 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11211 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11212 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11213 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11214 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11215 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11216 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11217 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11218 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11219 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11220 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11221 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11222 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11223 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11224 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11225 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11226 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11227 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11228 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11229 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11230 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11231 }
11232 switch (enmEvent2)
11233 {
11234 /** @todo consider which extra parameters would be helpful for each probe. */
11235 case DBGFEVENT_END: break;
11236 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11237 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11238 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11239 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11240 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11241 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11242 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11243 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11244 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11245 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11246 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11247 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11248 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11249 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11250 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11251 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11252 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11253 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11254 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11255 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11256 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11257 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11258 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11259 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11260 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11261 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11262 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11263 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11264 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11265 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11266 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11267 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11268 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11269 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11270 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11271 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11272 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11273 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11274 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11275 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11276 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11277 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11278 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11279 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11280 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11281 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11282 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11283 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11284 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11285 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11286 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11287 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11288 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11289 }
11290 }
11291
11292 /*
11293 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11294 * the DBGF call will do a full check).
11295 *
11296 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11297 * Note! If we have to events, we prioritize the first, i.e. the instruction
11298 * one, in order to avoid event nesting.
11299 */
11300 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11301 if ( enmEvent1 != DBGFEVENT_END
11302 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11303 {
11304 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11305 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11306 if (rcStrict != VINF_SUCCESS)
11307 return rcStrict;
11308 }
11309 else if ( enmEvent2 != DBGFEVENT_END
11310 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11311 {
11312 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11313 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11314 if (rcStrict != VINF_SUCCESS)
11315 return rcStrict;
11316 }
11317
11318 return VINF_SUCCESS;
11319}
11320
11321
11322/**
11323 * Single-stepping VM-exit filtering.
11324 *
11325 * This is preprocessing the VM-exits and deciding whether we've gotten far
11326 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11327 * handling is performed.
11328 *
11329 * @returns Strict VBox status code (i.e. informational status codes too).
11330 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11331 * @param pVmxTransient The VMX-transient structure.
11332 * @param pDbgState The debug state.
11333 */
11334DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11335{
11336 /*
11337 * Expensive (saves context) generic dtrace VM-exit probe.
11338 */
11339 uint32_t const uExitReason = pVmxTransient->uExitReason;
11340 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11341 { /* more likely */ }
11342 else
11343 {
11344 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11345 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11346 AssertRC(rc);
11347 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11348 }
11349
11350#ifndef IN_NEM_DARWIN
11351 /*
11352 * Check for host NMI, just to get that out of the way.
11353 */
11354 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11355 { /* normally likely */ }
11356 else
11357 {
11358 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11359 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11360 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11361 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11362 }
11363#endif
11364
11365 /*
11366 * Check for single stepping event if we're stepping.
11367 */
11368 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11369 {
11370 switch (uExitReason)
11371 {
11372 case VMX_EXIT_MTF:
11373 return vmxHCExitMtf(pVCpu, pVmxTransient);
11374
11375 /* Various events: */
11376 case VMX_EXIT_XCPT_OR_NMI:
11377 case VMX_EXIT_EXT_INT:
11378 case VMX_EXIT_TRIPLE_FAULT:
11379 case VMX_EXIT_INT_WINDOW:
11380 case VMX_EXIT_NMI_WINDOW:
11381 case VMX_EXIT_TASK_SWITCH:
11382 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11383 case VMX_EXIT_APIC_ACCESS:
11384 case VMX_EXIT_EPT_VIOLATION:
11385 case VMX_EXIT_EPT_MISCONFIG:
11386 case VMX_EXIT_PREEMPT_TIMER:
11387
11388 /* Instruction specific VM-exits: */
11389 case VMX_EXIT_CPUID:
11390 case VMX_EXIT_GETSEC:
11391 case VMX_EXIT_HLT:
11392 case VMX_EXIT_INVD:
11393 case VMX_EXIT_INVLPG:
11394 case VMX_EXIT_RDPMC:
11395 case VMX_EXIT_RDTSC:
11396 case VMX_EXIT_RSM:
11397 case VMX_EXIT_VMCALL:
11398 case VMX_EXIT_VMCLEAR:
11399 case VMX_EXIT_VMLAUNCH:
11400 case VMX_EXIT_VMPTRLD:
11401 case VMX_EXIT_VMPTRST:
11402 case VMX_EXIT_VMREAD:
11403 case VMX_EXIT_VMRESUME:
11404 case VMX_EXIT_VMWRITE:
11405 case VMX_EXIT_VMXOFF:
11406 case VMX_EXIT_VMXON:
11407 case VMX_EXIT_MOV_CRX:
11408 case VMX_EXIT_MOV_DRX:
11409 case VMX_EXIT_IO_INSTR:
11410 case VMX_EXIT_RDMSR:
11411 case VMX_EXIT_WRMSR:
11412 case VMX_EXIT_MWAIT:
11413 case VMX_EXIT_MONITOR:
11414 case VMX_EXIT_PAUSE:
11415 case VMX_EXIT_GDTR_IDTR_ACCESS:
11416 case VMX_EXIT_LDTR_TR_ACCESS:
11417 case VMX_EXIT_INVEPT:
11418 case VMX_EXIT_RDTSCP:
11419 case VMX_EXIT_INVVPID:
11420 case VMX_EXIT_WBINVD:
11421 case VMX_EXIT_XSETBV:
11422 case VMX_EXIT_RDRAND:
11423 case VMX_EXIT_INVPCID:
11424 case VMX_EXIT_VMFUNC:
11425 case VMX_EXIT_RDSEED:
11426 case VMX_EXIT_XSAVES:
11427 case VMX_EXIT_XRSTORS:
11428 {
11429 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11430 AssertRCReturn(rc, rc);
11431 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11432 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11433 return VINF_EM_DBG_STEPPED;
11434 break;
11435 }
11436
11437 /* Errors and unexpected events: */
11438 case VMX_EXIT_INIT_SIGNAL:
11439 case VMX_EXIT_SIPI:
11440 case VMX_EXIT_IO_SMI:
11441 case VMX_EXIT_SMI:
11442 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11443 case VMX_EXIT_ERR_MSR_LOAD:
11444 case VMX_EXIT_ERR_MACHINE_CHECK:
11445 case VMX_EXIT_PML_FULL:
11446 case VMX_EXIT_VIRTUALIZED_EOI:
11447 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11448 break;
11449
11450 default:
11451 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11452 break;
11453 }
11454 }
11455
11456 /*
11457 * Check for debugger event breakpoints and dtrace probes.
11458 */
11459 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11460 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11461 {
11462 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11463 if (rcStrict != VINF_SUCCESS)
11464 return rcStrict;
11465 }
11466
11467 /*
11468 * Normal processing.
11469 */
11470#ifdef HMVMX_USE_FUNCTION_TABLE
11471 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11472#else
11473 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11474#endif
11475}
11476
11477/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette