VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97069

Last change on this file since 97069 was 97069, checked in by vboxsync, 3 years ago

VMM/HMVMXR0: Working on streamlining CPU state importing from the VMCS. This does cause quite some code bloat (release linux from 93950 to 132120 text bytes), but it is hopefully worth it. This should also provide some basis for addressing the @todo in nemR3DarwinHandleExitCommon (NEM/darwin) where the code imports the entire state for every exit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 519.3 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97069 2022-10-10 15:03:10Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330
331 /* 16-bit guest-state fields. */
332 VMX_VMCS16_GUEST_ES_SEL,
333 VMX_VMCS16_GUEST_CS_SEL,
334 VMX_VMCS16_GUEST_SS_SEL,
335 VMX_VMCS16_GUEST_DS_SEL,
336 VMX_VMCS16_GUEST_FS_SEL,
337 VMX_VMCS16_GUEST_GS_SEL,
338 VMX_VMCS16_GUEST_LDTR_SEL,
339 VMX_VMCS16_GUEST_TR_SEL,
340 VMX_VMCS16_GUEST_INTR_STATUS,
341 VMX_VMCS16_GUEST_PML_INDEX,
342
343 /* 16-bits host-state fields. */
344 VMX_VMCS16_HOST_ES_SEL,
345 VMX_VMCS16_HOST_CS_SEL,
346 VMX_VMCS16_HOST_SS_SEL,
347 VMX_VMCS16_HOST_DS_SEL,
348 VMX_VMCS16_HOST_FS_SEL,
349 VMX_VMCS16_HOST_GS_SEL,
350 VMX_VMCS16_HOST_TR_SEL,
351
352 /* 64-bit control fields. */
353 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
355 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
357 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
358 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
361 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
365 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
367 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
369 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
370 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
371 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
373 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
375 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
377 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
379 VMX_VMCS64_CTRL_EPTP_FULL,
380 VMX_VMCS64_CTRL_EPTP_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
389 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
390 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
391 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
395 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
397 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
401 VMX_VMCS64_CTRL_SPPTP_FULL,
402 VMX_VMCS64_CTRL_SPPTP_HIGH,
403 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
405 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
406 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
407 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
409
410 /* 64-bit read-only data fields. */
411 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
413
414 /* 64-bit guest-state fields. */
415 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
417 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
418 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
419 VMX_VMCS64_GUEST_PAT_FULL,
420 VMX_VMCS64_GUEST_PAT_HIGH,
421 VMX_VMCS64_GUEST_EFER_FULL,
422 VMX_VMCS64_GUEST_EFER_HIGH,
423 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
425 VMX_VMCS64_GUEST_PDPTE0_FULL,
426 VMX_VMCS64_GUEST_PDPTE0_HIGH,
427 VMX_VMCS64_GUEST_PDPTE1_FULL,
428 VMX_VMCS64_GUEST_PDPTE1_HIGH,
429 VMX_VMCS64_GUEST_PDPTE2_FULL,
430 VMX_VMCS64_GUEST_PDPTE2_HIGH,
431 VMX_VMCS64_GUEST_PDPTE3_FULL,
432 VMX_VMCS64_GUEST_PDPTE3_HIGH,
433 VMX_VMCS64_GUEST_BNDCFGS_FULL,
434 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
435 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
436 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
437 VMX_VMCS64_GUEST_PKRS_FULL,
438 VMX_VMCS64_GUEST_PKRS_HIGH,
439
440 /* 64-bit host-state fields. */
441 VMX_VMCS64_HOST_PAT_FULL,
442 VMX_VMCS64_HOST_PAT_HIGH,
443 VMX_VMCS64_HOST_EFER_FULL,
444 VMX_VMCS64_HOST_EFER_HIGH,
445 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
447 VMX_VMCS64_HOST_PKRS_FULL,
448 VMX_VMCS64_HOST_PKRS_HIGH,
449
450 /* 32-bit control fields. */
451 VMX_VMCS32_CTRL_PIN_EXEC,
452 VMX_VMCS32_CTRL_PROC_EXEC,
453 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
454 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
456 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
457 VMX_VMCS32_CTRL_EXIT,
458 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
459 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY,
461 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
462 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
463 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
464 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
465 VMX_VMCS32_CTRL_TPR_THRESHOLD,
466 VMX_VMCS32_CTRL_PROC_EXEC2,
467 VMX_VMCS32_CTRL_PLE_GAP,
468 VMX_VMCS32_CTRL_PLE_WINDOW,
469
470 /* 32-bits read-only fields. */
471 VMX_VMCS32_RO_VM_INSTR_ERROR,
472 VMX_VMCS32_RO_EXIT_REASON,
473 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
475 VMX_VMCS32_RO_IDT_VECTORING_INFO,
476 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
477 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
478 VMX_VMCS32_RO_EXIT_INSTR_INFO,
479
480 /* 32-bit guest-state fields. */
481 VMX_VMCS32_GUEST_ES_LIMIT,
482 VMX_VMCS32_GUEST_CS_LIMIT,
483 VMX_VMCS32_GUEST_SS_LIMIT,
484 VMX_VMCS32_GUEST_DS_LIMIT,
485 VMX_VMCS32_GUEST_FS_LIMIT,
486 VMX_VMCS32_GUEST_GS_LIMIT,
487 VMX_VMCS32_GUEST_LDTR_LIMIT,
488 VMX_VMCS32_GUEST_TR_LIMIT,
489 VMX_VMCS32_GUEST_GDTR_LIMIT,
490 VMX_VMCS32_GUEST_IDTR_LIMIT,
491 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_INT_STATE,
500 VMX_VMCS32_GUEST_ACTIVITY_STATE,
501 VMX_VMCS32_GUEST_SMBASE,
502 VMX_VMCS32_GUEST_SYSENTER_CS,
503 VMX_VMCS32_PREEMPT_TIMER_VALUE,
504
505 /* 32-bit host-state fields. */
506 VMX_VMCS32_HOST_SYSENTER_CS,
507
508 /* Natural-width control fields. */
509 VMX_VMCS_CTRL_CR0_MASK,
510 VMX_VMCS_CTRL_CR4_MASK,
511 VMX_VMCS_CTRL_CR0_READ_SHADOW,
512 VMX_VMCS_CTRL_CR4_READ_SHADOW,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
517
518 /* Natural-width read-only data fields. */
519 VMX_VMCS_RO_EXIT_QUALIFICATION,
520 VMX_VMCS_RO_IO_RCX,
521 VMX_VMCS_RO_IO_RSI,
522 VMX_VMCS_RO_IO_RDI,
523 VMX_VMCS_RO_IO_RIP,
524 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
525
526 /* Natural-width guest-state field */
527 VMX_VMCS_GUEST_CR0,
528 VMX_VMCS_GUEST_CR3,
529 VMX_VMCS_GUEST_CR4,
530 VMX_VMCS_GUEST_ES_BASE,
531 VMX_VMCS_GUEST_CS_BASE,
532 VMX_VMCS_GUEST_SS_BASE,
533 VMX_VMCS_GUEST_DS_BASE,
534 VMX_VMCS_GUEST_FS_BASE,
535 VMX_VMCS_GUEST_GS_BASE,
536 VMX_VMCS_GUEST_LDTR_BASE,
537 VMX_VMCS_GUEST_TR_BASE,
538 VMX_VMCS_GUEST_GDTR_BASE,
539 VMX_VMCS_GUEST_IDTR_BASE,
540 VMX_VMCS_GUEST_DR7,
541 VMX_VMCS_GUEST_RSP,
542 VMX_VMCS_GUEST_RIP,
543 VMX_VMCS_GUEST_RFLAGS,
544 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
545 VMX_VMCS_GUEST_SYSENTER_ESP,
546 VMX_VMCS_GUEST_SYSENTER_EIP,
547 VMX_VMCS_GUEST_S_CET,
548 VMX_VMCS_GUEST_SSP,
549 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
550
551 /* Natural-width host-state fields */
552 VMX_VMCS_HOST_CR0,
553 VMX_VMCS_HOST_CR3,
554 VMX_VMCS_HOST_CR4,
555 VMX_VMCS_HOST_FS_BASE,
556 VMX_VMCS_HOST_GS_BASE,
557 VMX_VMCS_HOST_TR_BASE,
558 VMX_VMCS_HOST_GDTR_BASE,
559 VMX_VMCS_HOST_IDTR_BASE,
560 VMX_VMCS_HOST_SYSENTER_ESP,
561 VMX_VMCS_HOST_SYSENTER_EIP,
562 VMX_VMCS_HOST_RSP,
563 VMX_VMCS_HOST_RIP,
564 VMX_VMCS_HOST_S_CET,
565 VMX_VMCS_HOST_SSP,
566 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
567};
568#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
569
570#ifdef HMVMX_USE_FUNCTION_TABLE
571/**
572 * VMX_EXIT dispatch table.
573 */
574static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
575{
576 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
577 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
578 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
579 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
580 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
581 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
582 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
583 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
584 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
585 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
586 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
587 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
588 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
589 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
590 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
591 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
592 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
593 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
594 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
596 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
597 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
598 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
599 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
600 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
601 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
602 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
603 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
604 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
605#else
606 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
607 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
608 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
609 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
610 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
611 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
612 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
613 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
614 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
615#endif
616 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
617 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
618 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
619 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
620 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
621 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
622 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
623 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
624 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
625 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
626 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
627 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
628 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
629 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
630 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
632 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
633 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
634 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
635 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
636 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
637 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
639 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
640#else
641 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
642#endif
643 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
644 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
646 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
647#else
648 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
651 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
652 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
653 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
654 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
655 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
656 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
657 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
658 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
659 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
660 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
661 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
662 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
663 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
664 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
665 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
666};
667#endif /* HMVMX_USE_FUNCTION_TABLE */
668
669#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
670static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
671{
672 /* 0 */ "(Not Used)",
673 /* 1 */ "VMCALL executed in VMX root operation.",
674 /* 2 */ "VMCLEAR with invalid physical address.",
675 /* 3 */ "VMCLEAR with VMXON pointer.",
676 /* 4 */ "VMLAUNCH with non-clear VMCS.",
677 /* 5 */ "VMRESUME with non-launched VMCS.",
678 /* 6 */ "VMRESUME after VMXOFF",
679 /* 7 */ "VM-entry with invalid control fields.",
680 /* 8 */ "VM-entry with invalid host state fields.",
681 /* 9 */ "VMPTRLD with invalid physical address.",
682 /* 10 */ "VMPTRLD with VMXON pointer.",
683 /* 11 */ "VMPTRLD with incorrect revision identifier.",
684 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
685 /* 13 */ "VMWRITE to read-only VMCS component.",
686 /* 14 */ "(Not Used)",
687 /* 15 */ "VMXON executed in VMX root operation.",
688 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
689 /* 17 */ "VM-entry with non-launched executing VMCS.",
690 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
691 /* 19 */ "VMCALL with non-clear VMCS.",
692 /* 20 */ "VMCALL with invalid VM-exit control fields.",
693 /* 21 */ "(Not Used)",
694 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
695 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
696 /* 24 */ "VMCALL with invalid SMM-monitor features.",
697 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
698 /* 26 */ "VM-entry with events blocked by MOV SS.",
699 /* 27 */ "(Not Used)",
700 /* 28 */ "Invalid operand to INVEPT/INVVPID."
701};
702#endif /* VBOX_STRICT && LOG_ENABLED */
703
704
705/**
706 * Gets the CR0 guest/host mask.
707 *
708 * These bits typically does not change through the lifetime of a VM. Any bit set in
709 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
710 * by the guest.
711 *
712 * @returns The CR0 guest/host mask.
713 * @param pVCpu The cross context virtual CPU structure.
714 */
715static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
716{
717 /*
718 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
719 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
720 *
721 * Furthermore, modifications to any bits that are reserved/unspecified currently
722 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
723 * when future CPUs specify and use currently reserved/unspecified bits.
724 */
725 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
726 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
727 * and @bugref{6944}. */
728 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
729 return ( X86_CR0_PE
730 | X86_CR0_NE
731 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
732 | X86_CR0_PG
733 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
734}
735
736
737/**
738 * Gets the CR4 guest/host mask.
739 *
740 * These bits typically does not change through the lifetime of a VM. Any bit set in
741 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
742 * by the guest.
743 *
744 * @returns The CR4 guest/host mask.
745 * @param pVCpu The cross context virtual CPU structure.
746 */
747static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
748{
749 /*
750 * We construct a mask of all CR4 bits that the guest can modify without causing
751 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
752 * a VM-exit when the guest attempts to modify them when executing using
753 * hardware-assisted VMX.
754 *
755 * When a feature is not exposed to the guest (and may be present on the host),
756 * we want to intercept guest modifications to the bit so we can emulate proper
757 * behavior (e.g., #GP).
758 *
759 * Furthermore, only modifications to those bits that don't require immediate
760 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
761 * depends on CR3 which might not always be the guest value while executing
762 * using hardware-assisted VMX.
763 */
764 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
765 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
766#ifdef IN_NEM_DARWIN
767 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
768#endif
769 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
770
771 /*
772 * Paranoia.
773 * Ensure features exposed to the guest are present on the host.
774 */
775 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
776#ifdef IN_NEM_DARWIN
777 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
778#endif
779 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
780
781 uint64_t const fGstMask = X86_CR4_PVI
782 | X86_CR4_TSD
783 | X86_CR4_DE
784 | X86_CR4_MCE
785 | X86_CR4_PCE
786 | X86_CR4_OSXMMEEXCPT
787 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
788#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
789 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
790 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
791#endif
792 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
793 return ~fGstMask;
794}
795
796
797/**
798 * Adds one or more exceptions to the exception bitmap and commits it to the current
799 * VMCS.
800 *
801 * @param pVCpu The cross context virtual CPU structure.
802 * @param pVmxTransient The VMX-transient structure.
803 * @param uXcptMask The exception(s) to add.
804 */
805static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
806{
807 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
808 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
809 if ((uXcptBitmap & uXcptMask) != uXcptMask)
810 {
811 uXcptBitmap |= uXcptMask;
812 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
813 AssertRC(rc);
814 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
815 }
816}
817
818
819/**
820 * Adds an exception to the exception bitmap and commits it to the current VMCS.
821 *
822 * @param pVCpu The cross context virtual CPU structure.
823 * @param pVmxTransient The VMX-transient structure.
824 * @param uXcpt The exception to add.
825 */
826static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
827{
828 Assert(uXcpt <= X86_XCPT_LAST);
829 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
830}
831
832
833/**
834 * Remove one or more exceptions from the exception bitmap and commits it to the
835 * current VMCS.
836 *
837 * This takes care of not removing the exception intercept if a nested-guest
838 * requires the exception to be intercepted.
839 *
840 * @returns VBox status code.
841 * @param pVCpu The cross context virtual CPU structure.
842 * @param pVmxTransient The VMX-transient structure.
843 * @param uXcptMask The exception(s) to remove.
844 */
845static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
846{
847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
848 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
849 if (u32XcptBitmap & uXcptMask)
850 {
851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
852 if (!pVmxTransient->fIsNestedGuest)
853 { /* likely */ }
854 else
855 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
856#endif
857#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
858 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
859 | RT_BIT(X86_XCPT_DE)
860 | RT_BIT(X86_XCPT_NM)
861 | RT_BIT(X86_XCPT_TS)
862 | RT_BIT(X86_XCPT_UD)
863 | RT_BIT(X86_XCPT_NP)
864 | RT_BIT(X86_XCPT_SS)
865 | RT_BIT(X86_XCPT_GP)
866 | RT_BIT(X86_XCPT_PF)
867 | RT_BIT(X86_XCPT_MF));
868#elif defined(HMVMX_ALWAYS_TRAP_PF)
869 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
870#endif
871 if (uXcptMask)
872 {
873 /* Validate we are not removing any essential exception intercepts. */
874#ifndef IN_NEM_DARWIN
875 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
876#else
877 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
878#endif
879 NOREF(pVCpu);
880 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
881 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
882
883 /* Remove it from the exception bitmap. */
884 u32XcptBitmap &= ~uXcptMask;
885
886 /* Commit and update the cache if necessary. */
887 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
888 {
889 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
890 AssertRC(rc);
891 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
892 }
893 }
894 }
895 return VINF_SUCCESS;
896}
897
898
899/**
900 * Remove an exceptions from the exception bitmap and commits it to the current
901 * VMCS.
902 *
903 * @returns VBox status code.
904 * @param pVCpu The cross context virtual CPU structure.
905 * @param pVmxTransient The VMX-transient structure.
906 * @param uXcpt The exception to remove.
907 */
908static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
909{
910 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
911}
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
914
915/**
916 * Loads the shadow VMCS specified by the VMCS info. object.
917 *
918 * @returns VBox status code.
919 * @param pVmcsInfo The VMCS info. object.
920 *
921 * @remarks Can be called with interrupts disabled.
922 */
923static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
924{
925 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
926 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
927
928 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
929 if (RT_SUCCESS(rc))
930 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
931 return rc;
932}
933
934
935/**
936 * Clears the shadow VMCS specified by the VMCS info. object.
937 *
938 * @returns VBox status code.
939 * @param pVmcsInfo The VMCS info. object.
940 *
941 * @remarks Can be called with interrupts disabled.
942 */
943static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
944{
945 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
946 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
947
948 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
949 if (RT_SUCCESS(rc))
950 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
951 return rc;
952}
953
954
955/**
956 * Switches from and to the specified VMCSes.
957 *
958 * @returns VBox status code.
959 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
960 * @param pVmcsInfoTo The VMCS info. object we are switching to.
961 *
962 * @remarks Called with interrupts disabled.
963 */
964static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
965{
966 /*
967 * Clear the VMCS we are switching out if it has not already been cleared.
968 * This will sync any CPU internal data back to the VMCS.
969 */
970 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
971 {
972 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
973 if (RT_SUCCESS(rc))
974 {
975 /*
976 * The shadow VMCS, if any, would not be active at this point since we
977 * would have cleared it while importing the virtual hardware-virtualization
978 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
979 * clear the shadow VMCS here, just assert for safety.
980 */
981 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
982 }
983 else
984 return rc;
985 }
986
987 /*
988 * Clear the VMCS we are switching to if it has not already been cleared.
989 * This will initialize the VMCS launch state to "clear" required for loading it.
990 *
991 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
992 */
993 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
994 {
995 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
996 if (RT_SUCCESS(rc))
997 { /* likely */ }
998 else
999 return rc;
1000 }
1001
1002 /*
1003 * Finally, load the VMCS we are switching to.
1004 */
1005 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1006}
1007
1008
1009/**
1010 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1011 * caller.
1012 *
1013 * @returns VBox status code.
1014 * @param pVCpu The cross context virtual CPU structure.
1015 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1016 * true) or guest VMCS (pass false).
1017 */
1018static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1019{
1020 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1021 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1022
1023 PVMXVMCSINFO pVmcsInfoFrom;
1024 PVMXVMCSINFO pVmcsInfoTo;
1025 if (fSwitchToNstGstVmcs)
1026 {
1027 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1028 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1029 }
1030 else
1031 {
1032 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1033 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1034 }
1035
1036 /*
1037 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1038 * preemption hook code path acquires the current VMCS.
1039 */
1040 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1041
1042 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1043 if (RT_SUCCESS(rc))
1044 {
1045 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1046 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1047
1048 /*
1049 * If we are switching to a VMCS that was executed on a different host CPU or was
1050 * never executed before, flag that we need to export the host state before executing
1051 * guest/nested-guest code using hardware-assisted VMX.
1052 *
1053 * This could probably be done in a preemptible context since the preemption hook
1054 * will flag the necessary change in host context. However, since preemption is
1055 * already disabled and to avoid making assumptions about host specific code in
1056 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1057 * disabled.
1058 */
1059 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1060 { /* likely */ }
1061 else
1062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1063
1064 ASMSetFlags(fEFlags);
1065
1066 /*
1067 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1068 * flag that we need to update the host MSR values there. Even if we decide in the
1069 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1070 * if its content differs, we would have to update the host MSRs anyway.
1071 */
1072 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1073 }
1074 else
1075 ASMSetFlags(fEFlags);
1076 return rc;
1077}
1078
1079#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1080#ifdef VBOX_STRICT
1081
1082/**
1083 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1084 * transient structure.
1085 *
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param pVmxTransient The VMX-transient structure.
1088 */
1089DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1090{
1091 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1092 AssertRC(rc);
1093}
1094
1095
1096/**
1097 * Reads the VM-entry exception error code field from the VMCS into
1098 * the VMX transient structure.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure.
1101 * @param pVmxTransient The VMX-transient structure.
1102 */
1103DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1104{
1105 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1106 AssertRC(rc);
1107}
1108
1109
1110/**
1111 * Reads the VM-entry exception error code field from the VMCS into
1112 * the VMX transient structure.
1113 *
1114 * @param pVCpu The cross context virtual CPU structure.
1115 * @param pVmxTransient The VMX-transient structure.
1116 */
1117DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1118{
1119 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1120 AssertRC(rc);
1121}
1122
1123#endif /* VBOX_STRICT */
1124
1125
1126/**
1127 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1128 *
1129 * Don't call directly unless the it's likely that some or all of the fields
1130 * given in @a a_fReadMask have already been read.
1131 *
1132 * @tparam a_fReadMask The fields to read.
1133 * @param pVCpu The cross context virtual CPU structure.
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136template<uint32_t const a_fReadMask>
1137static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1138{
1139 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1140 | HMVMX_READ_EXIT_INSTR_LEN
1141 | HMVMX_READ_EXIT_INSTR_INFO
1142 | HMVMX_READ_IDT_VECTORING_INFO
1143 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1144 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1145 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1146 | HMVMX_READ_GUEST_LINEAR_ADDR
1147 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1148 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1149 )) == 0);
1150
1151 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1152 {
1153 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1154
1155 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1156 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1157 {
1158 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1159 AssertRC(rc);
1160 }
1161 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1162 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1163 {
1164 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1165 AssertRC(rc);
1166 }
1167 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1168 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1169 {
1170 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1171 AssertRC(rc);
1172 }
1173 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1174 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1175 {
1176 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1177 AssertRC(rc);
1178 }
1179 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1180 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1181 {
1182 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1183 AssertRC(rc);
1184 }
1185 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1186 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1187 {
1188 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1189 AssertRC(rc);
1190 }
1191 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1192 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1193 {
1194 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1195 AssertRC(rc);
1196 }
1197 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1198 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1199 {
1200 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1201 AssertRC(rc);
1202 }
1203 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1204 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1205 {
1206 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1207 AssertRC(rc);
1208 }
1209 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1210 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1211 {
1212 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1213 AssertRC(rc);
1214 }
1215
1216 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1217 }
1218}
1219
1220
1221/**
1222 * Reads VMCS fields into the VMXTRANSIENT structure.
1223 *
1224 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1225 * generating an optimized read sequences w/o any conditionals between in
1226 * non-strict builds.
1227 *
1228 * @tparam a_fReadMask The fields to read. One or more of the
1229 * HMVMX_READ_XXX fields ORed together.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 * @param pVmxTransient The VMX-transient structure.
1232 */
1233template<uint32_t const a_fReadMask>
1234DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1235{
1236 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1237 | HMVMX_READ_EXIT_INSTR_LEN
1238 | HMVMX_READ_EXIT_INSTR_INFO
1239 | HMVMX_READ_IDT_VECTORING_INFO
1240 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1241 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1242 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1243 | HMVMX_READ_GUEST_LINEAR_ADDR
1244 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1245 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1246 )) == 0);
1247
1248 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1249 {
1250 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1251 {
1252 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1253 AssertRC(rc);
1254 }
1255 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1256 {
1257 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1258 AssertRC(rc);
1259 }
1260 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1261 {
1262 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1263 AssertRC(rc);
1264 }
1265 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1266 {
1267 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1268 AssertRC(rc);
1269 }
1270 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1271 {
1272 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1273 AssertRC(rc);
1274 }
1275 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1276 {
1277 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1278 AssertRC(rc);
1279 }
1280 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1281 {
1282 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1283 AssertRC(rc);
1284 }
1285 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1286 {
1287 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1288 AssertRC(rc);
1289 }
1290 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1291 {
1292 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1293 AssertRC(rc);
1294 }
1295 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1296 {
1297 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1298 AssertRC(rc);
1299 }
1300
1301 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1302 }
1303 else
1304 {
1305 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1306 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1307 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1308 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1309 }
1310}
1311
1312
1313#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1314/**
1315 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1316 *
1317 * @param pVCpu The cross context virtual CPU structure.
1318 * @param pVmxTransient The VMX-transient structure.
1319 */
1320static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1321{
1322 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1323 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1324 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1325 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1326 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1327 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1328 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1329 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1330 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1331 AssertRC(rc);
1332 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1333 | HMVMX_READ_EXIT_INSTR_LEN
1334 | HMVMX_READ_EXIT_INSTR_INFO
1335 | HMVMX_READ_IDT_VECTORING_INFO
1336 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1337 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1338 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1339 | HMVMX_READ_GUEST_LINEAR_ADDR
1340 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1341}
1342#endif
1343
1344/**
1345 * Verifies that our cached values of the VMCS fields are all consistent with
1346 * what's actually present in the VMCS.
1347 *
1348 * @returns VBox status code.
1349 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1350 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1351 * VMCS content. HMCPU error-field is
1352 * updated, see VMX_VCI_XXX.
1353 * @param pVCpu The cross context virtual CPU structure.
1354 * @param pVmcsInfo The VMCS info. object.
1355 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1356 */
1357static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1358{
1359 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1360
1361 uint32_t u32Val;
1362 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1363 AssertRC(rc);
1364 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1365 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1366 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1367 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1368
1369 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1372 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1379 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1386 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1391 {
1392 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1395 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398 }
1399
1400 uint64_t u64Val;
1401 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1402 {
1403 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1404 AssertRC(rc);
1405 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1406 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1407 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1408 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1409 }
1410
1411 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1414 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417
1418 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1421 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 NOREF(pcszVmcs);
1426 return VINF_SUCCESS;
1427}
1428
1429
1430/**
1431 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1432 * VMCS.
1433 *
1434 * This is typically required when the guest changes paging mode.
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The cross context virtual CPU structure.
1438 * @param pVmxTransient The VMX-transient structure.
1439 *
1440 * @remarks Requires EFER.
1441 * @remarks No-long-jump zone!!!
1442 */
1443static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1444{
1445 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1446 {
1447 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1448 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1449
1450 /*
1451 * VM-entry controls.
1452 */
1453 {
1454 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1455 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1456
1457 /*
1458 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1459 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1460 *
1461 * For nested-guests, this is a mandatory VM-entry control. It's also
1462 * required because we do not want to leak host bits to the nested-guest.
1463 */
1464 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1465
1466 /*
1467 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1468 *
1469 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1470 * required to get the nested-guest working with hardware-assisted VMX execution.
1471 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1472 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1473 * here rather than while merging the guest VMCS controls.
1474 */
1475 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1476 {
1477 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1478 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1479 }
1480 else
1481 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1482
1483 /*
1484 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1485 *
1486 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1487 * regardless of whether the nested-guest VMCS specifies it because we are free to
1488 * load whatever MSRs we require and we do not need to modify the guest visible copy
1489 * of the VM-entry MSR load area.
1490 */
1491 if ( g_fHmVmxSupportsVmcsEfer
1492#ifndef IN_NEM_DARWIN
1493 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1494#endif
1495 )
1496 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1497 else
1498 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1499
1500 /*
1501 * The following should -not- be set (since we're not in SMM mode):
1502 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1503 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1504 */
1505
1506 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1507 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1508
1509 if ((fVal & fZap) == fVal)
1510 { /* likely */ }
1511 else
1512 {
1513 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1514 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1515 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1516 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1517 }
1518
1519 /* Commit it to the VMCS. */
1520 if (pVmcsInfo->u32EntryCtls != fVal)
1521 {
1522 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1523 AssertRC(rc);
1524 pVmcsInfo->u32EntryCtls = fVal;
1525 }
1526 }
1527
1528 /*
1529 * VM-exit controls.
1530 */
1531 {
1532 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1533 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1534
1535 /*
1536 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1537 * supported the 1-setting of this bit.
1538 *
1539 * For nested-guests, we set the "save debug controls" as the converse
1540 * "load debug controls" is mandatory for nested-guests anyway.
1541 */
1542 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1543
1544 /*
1545 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1546 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1547 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1548 * vmxHCExportHostMsrs().
1549 *
1550 * For nested-guests, we always set this bit as we do not support 32-bit
1551 * hosts.
1552 */
1553 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1554
1555#ifndef IN_NEM_DARWIN
1556 /*
1557 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1558 *
1559 * For nested-guests, we should use the "save IA32_EFER" control if we also
1560 * used the "load IA32_EFER" control while exporting VM-entry controls.
1561 */
1562 if ( g_fHmVmxSupportsVmcsEfer
1563 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1564 {
1565 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1566 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1567 }
1568#endif
1569
1570 /*
1571 * Enable saving of the VMX-preemption timer value on VM-exit.
1572 * For nested-guests, currently not exposed/used.
1573 */
1574 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1575 * the timer value. */
1576 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1577 {
1578 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1579 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1580 }
1581
1582 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1583 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1584
1585 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1586 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1587 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1588
1589 if ((fVal & fZap) == fVal)
1590 { /* likely */ }
1591 else
1592 {
1593 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1594 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1595 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1596 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1597 }
1598
1599 /* Commit it to the VMCS. */
1600 if (pVmcsInfo->u32ExitCtls != fVal)
1601 {
1602 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1603 AssertRC(rc);
1604 pVmcsInfo->u32ExitCtls = fVal;
1605 }
1606 }
1607
1608 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1609 }
1610 return VINF_SUCCESS;
1611}
1612
1613
1614/**
1615 * Sets the TPR threshold in the VMCS.
1616 *
1617 * @param pVCpu The cross context virtual CPU structure.
1618 * @param pVmcsInfo The VMCS info. object.
1619 * @param u32TprThreshold The TPR threshold (task-priority class only).
1620 */
1621DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1622{
1623 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1624 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1625 RT_NOREF(pVmcsInfo);
1626 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1627 AssertRC(rc);
1628}
1629
1630
1631/**
1632 * Exports the guest APIC TPR state into the VMCS.
1633 *
1634 * @param pVCpu The cross context virtual CPU structure.
1635 * @param pVmxTransient The VMX-transient structure.
1636 *
1637 * @remarks No-long-jump zone!!!
1638 */
1639static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1640{
1641 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1642 {
1643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1644
1645 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1646 if (!pVmxTransient->fIsNestedGuest)
1647 {
1648 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1649 && APICIsEnabled(pVCpu))
1650 {
1651 /*
1652 * Setup TPR shadowing.
1653 */
1654 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1655 {
1656 bool fPendingIntr = false;
1657 uint8_t u8Tpr = 0;
1658 uint8_t u8PendingIntr = 0;
1659 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1660 AssertRC(rc);
1661
1662 /*
1663 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1664 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1665 * priority of the pending interrupt so we can deliver the interrupt. If there
1666 * are no interrupts pending, set threshold to 0 to not cause any
1667 * TPR-below-threshold VM-exits.
1668 */
1669 uint32_t u32TprThreshold = 0;
1670 if (fPendingIntr)
1671 {
1672 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1673 (which is the Task-Priority Class). */
1674 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1675 const uint8_t u8TprPriority = u8Tpr >> 4;
1676 if (u8PendingPriority <= u8TprPriority)
1677 u32TprThreshold = u8PendingPriority;
1678 }
1679
1680 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1681 }
1682 }
1683 }
1684 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1685 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1686 }
1687}
1688
1689
1690/**
1691 * Gets the guest interruptibility-state and updates related force-flags.
1692 *
1693 * @returns Guest's interruptibility-state.
1694 * @param pVCpu The cross context virtual CPU structure.
1695 *
1696 * @remarks No-long-jump zone!!!
1697 */
1698static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1699{
1700 /*
1701 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1702 */
1703 uint32_t fIntrState = 0;
1704 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1705 {
1706 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1707 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1708
1709 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1710 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1711 {
1712 if (pCtx->eflags.Bits.u1IF)
1713 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1714 else
1715 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1716 }
1717 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1718 {
1719 /*
1720 * We can clear the inhibit force flag as even if we go back to the recompiler
1721 * without executing guest code in VT-x, the flag's condition to be cleared is
1722 * met and thus the cleared state is correct.
1723 */
1724 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1725 }
1726 }
1727
1728 /*
1729 * Check if we should inhibit NMI delivery.
1730 */
1731 if (CPUMIsGuestNmiBlocking(pVCpu))
1732 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1733
1734 /*
1735 * Validate.
1736 */
1737#ifdef VBOX_STRICT
1738 /* We don't support block-by-SMI yet.*/
1739 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1740
1741 /* Block-by-STI must not be set when interrupts are disabled. */
1742 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1743 {
1744 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1745 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1746 }
1747#endif
1748
1749 return fIntrState;
1750}
1751
1752
1753/**
1754 * Exports the exception intercepts required for guest execution in the VMCS.
1755 *
1756 * @param pVCpu The cross context virtual CPU structure.
1757 * @param pVmxTransient The VMX-transient structure.
1758 *
1759 * @remarks No-long-jump zone!!!
1760 */
1761static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1762{
1763 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1764 {
1765 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1766 if ( !pVmxTransient->fIsNestedGuest
1767 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1768 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1769 else
1770 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1771
1772 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1773 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1774 }
1775}
1776
1777
1778/**
1779 * Exports the guest's RIP into the guest-state area in the VMCS.
1780 *
1781 * @param pVCpu The cross context virtual CPU structure.
1782 *
1783 * @remarks No-long-jump zone!!!
1784 */
1785static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1786{
1787 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1788 {
1789 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1790
1791 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1792 AssertRC(rc);
1793
1794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1795 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1796 }
1797}
1798
1799
1800/**
1801 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1802 *
1803 * @param pVCpu The cross context virtual CPU structure.
1804 * @param pVmxTransient The VMX-transient structure.
1805 *
1806 * @remarks No-long-jump zone!!!
1807 */
1808static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1809{
1810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1811 {
1812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1813
1814 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1815 Let us assert it as such and use 32-bit VMWRITE. */
1816 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1817 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1818 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1819 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1820
1821#ifndef IN_NEM_DARWIN
1822 /*
1823 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1824 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1825 * can run the real-mode guest code under Virtual 8086 mode.
1826 */
1827 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1828 if (pVmcsInfo->RealMode.fRealOnV86Active)
1829 {
1830 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1831 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1832 Assert(!pVmxTransient->fIsNestedGuest);
1833 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1834 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1835 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1836 }
1837#else
1838 RT_NOREF(pVmxTransient);
1839#endif
1840
1841 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1842 AssertRC(rc);
1843
1844 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1845 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1846 }
1847}
1848
1849
1850#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1851/**
1852 * Copies the nested-guest VMCS to the shadow VMCS.
1853 *
1854 * @returns VBox status code.
1855 * @param pVCpu The cross context virtual CPU structure.
1856 * @param pVmcsInfo The VMCS info. object.
1857 *
1858 * @remarks No-long-jump zone!!!
1859 */
1860static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1861{
1862 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1863 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1864
1865 /*
1866 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1867 * current VMCS, as we may try saving guest lazy MSRs.
1868 *
1869 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1870 * calling the import VMCS code which is currently performing the guest MSR reads
1871 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1872 * and the rest of the VMX leave session machinery.
1873 */
1874 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1875
1876 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1877 if (RT_SUCCESS(rc))
1878 {
1879 /*
1880 * Copy all guest read/write VMCS fields.
1881 *
1882 * We don't check for VMWRITE failures here for performance reasons and
1883 * because they are not expected to fail, barring irrecoverable conditions
1884 * like hardware errors.
1885 */
1886 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1887 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1888 {
1889 uint64_t u64Val;
1890 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1891 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1892 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1893 }
1894
1895 /*
1896 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1897 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1898 */
1899 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1900 {
1901 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1902 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1903 {
1904 uint64_t u64Val;
1905 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1906 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1907 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1908 }
1909 }
1910
1911 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1912 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1913 }
1914
1915 ASMSetFlags(fEFlags);
1916 return rc;
1917}
1918
1919
1920/**
1921 * Copies the shadow VMCS to the nested-guest VMCS.
1922 *
1923 * @returns VBox status code.
1924 * @param pVCpu The cross context virtual CPU structure.
1925 * @param pVmcsInfo The VMCS info. object.
1926 *
1927 * @remarks Called with interrupts disabled.
1928 */
1929static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1930{
1931 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1932 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1933 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1934
1935 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1936 if (RT_SUCCESS(rc))
1937 {
1938 /*
1939 * Copy guest read/write fields from the shadow VMCS.
1940 * Guest read-only fields cannot be modified, so no need to copy them.
1941 *
1942 * We don't check for VMREAD failures here for performance reasons and
1943 * because they are not expected to fail, barring irrecoverable conditions
1944 * like hardware errors.
1945 */
1946 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1947 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1948 {
1949 uint64_t u64Val;
1950 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1951 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1952 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1953 }
1954
1955 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1956 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1957 }
1958 return rc;
1959}
1960
1961
1962/**
1963 * Enables VMCS shadowing for the given VMCS info. object.
1964 *
1965 * @param pVCpu The cross context virtual CPU structure.
1966 * @param pVmcsInfo The VMCS info. object.
1967 *
1968 * @remarks No-long-jump zone!!!
1969 */
1970static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1971{
1972 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1973 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1974 {
1975 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1976 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1977 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1978 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1979 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1980 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1981 Log4Func(("Enabled\n"));
1982 }
1983}
1984
1985
1986/**
1987 * Disables VMCS shadowing for the given VMCS info. object.
1988 *
1989 * @param pVCpu The cross context virtual CPU structure.
1990 * @param pVmcsInfo The VMCS info. object.
1991 *
1992 * @remarks No-long-jump zone!!!
1993 */
1994static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1995{
1996 /*
1997 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1998 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1999 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2000 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2001 *
2002 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2003 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2004 */
2005 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2006 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2007 {
2008 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2009 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2010 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2011 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2012 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2013 Log4Func(("Disabled\n"));
2014 }
2015}
2016#endif
2017
2018
2019/**
2020 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2021 *
2022 * The guest FPU state is always pre-loaded hence we don't need to bother about
2023 * sharing FPU related CR0 bits between the guest and host.
2024 *
2025 * @returns VBox status code.
2026 * @param pVCpu The cross context virtual CPU structure.
2027 * @param pVmxTransient The VMX-transient structure.
2028 *
2029 * @remarks No-long-jump zone!!!
2030 */
2031static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2032{
2033 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2034 {
2035 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2036 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2037
2038 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2039 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2040 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2041 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2042 else
2043 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2044
2045 if (!pVmxTransient->fIsNestedGuest)
2046 {
2047 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2048 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2049 uint64_t const u64ShadowCr0 = u64GuestCr0;
2050 Assert(!RT_HI_U32(u64GuestCr0));
2051
2052 /*
2053 * Setup VT-x's view of the guest CR0.
2054 */
2055 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2056 if (VM_IS_VMX_NESTED_PAGING(pVM))
2057 {
2058#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2059 if (CPUMIsGuestPagingEnabled(pVCpu))
2060 {
2061 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2062 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2063 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2064 }
2065 else
2066 {
2067 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2068 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2069 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2070 }
2071
2072 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2073 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2074 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2075#endif
2076 }
2077 else
2078 {
2079 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2080 u64GuestCr0 |= X86_CR0_WP;
2081 }
2082
2083 /*
2084 * Guest FPU bits.
2085 *
2086 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2087 * using CR0.TS.
2088 *
2089 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2090 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2091 */
2092 u64GuestCr0 |= X86_CR0_NE;
2093
2094 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2095 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2096
2097 /*
2098 * Update exception intercepts.
2099 */
2100 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2101#ifndef IN_NEM_DARWIN
2102 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2103 {
2104 Assert(PDMVmmDevHeapIsEnabled(pVM));
2105 Assert(pVM->hm.s.vmx.pRealModeTSS);
2106 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2107 }
2108 else
2109#endif
2110 {
2111 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2112 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2113 if (fInterceptMF)
2114 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2115 }
2116
2117 /* Additional intercepts for debugging, define these yourself explicitly. */
2118#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2119 uXcptBitmap |= 0
2120 | RT_BIT(X86_XCPT_BP)
2121 | RT_BIT(X86_XCPT_DE)
2122 | RT_BIT(X86_XCPT_NM)
2123 | RT_BIT(X86_XCPT_TS)
2124 | RT_BIT(X86_XCPT_UD)
2125 | RT_BIT(X86_XCPT_NP)
2126 | RT_BIT(X86_XCPT_SS)
2127 | RT_BIT(X86_XCPT_GP)
2128 | RT_BIT(X86_XCPT_PF)
2129 | RT_BIT(X86_XCPT_MF)
2130 ;
2131#elif defined(HMVMX_ALWAYS_TRAP_PF)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2133#endif
2134 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2135 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2136 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2137 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2138 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2139
2140 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2141 u64GuestCr0 |= fSetCr0;
2142 u64GuestCr0 &= fZapCr0;
2143 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177 Assert(!RT_HI_U32(u64GuestCr0));
2178 Assert(u64GuestCr0 & X86_CR0_NE);
2179
2180 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2181 u64GuestCr0 |= fSetCr0;
2182 u64GuestCr0 &= fZapCr0;
2183 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2184
2185 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2186 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2187 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2188
2189 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2190 }
2191
2192 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2193 }
2194
2195 return VINF_SUCCESS;
2196}
2197
2198
2199/**
2200 * Exports the guest control registers (CR3, CR4) into the guest-state area
2201 * in the VMCS.
2202 *
2203 * @returns VBox strict status code.
2204 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2205 * without unrestricted guest access and the VMMDev is not presently
2206 * mapped (e.g. EFI32).
2207 *
2208 * @param pVCpu The cross context virtual CPU structure.
2209 * @param pVmxTransient The VMX-transient structure.
2210 *
2211 * @remarks No-long-jump zone!!!
2212 */
2213static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2214{
2215 int rc = VINF_SUCCESS;
2216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2217
2218 /*
2219 * Guest CR2.
2220 * It's always loaded in the assembler code. Nothing to do here.
2221 */
2222
2223 /*
2224 * Guest CR3.
2225 */
2226 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2227 {
2228 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2229
2230 if (VM_IS_VMX_NESTED_PAGING(pVM))
2231 {
2232#ifndef IN_NEM_DARWIN
2233 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2234 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2235
2236 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2237 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2238 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2239 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2240
2241 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2242 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2243 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2244
2245 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2246 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2247 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2248 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2249 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2250 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2251 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2252
2253 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2254 AssertRC(rc);
2255#endif
2256
2257 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2258 uint64_t u64GuestCr3 = pCtx->cr3;
2259 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2260 || CPUMIsGuestPagingEnabledEx(pCtx))
2261 {
2262 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2263 if (CPUMIsGuestInPAEModeEx(pCtx))
2264 {
2265 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2266 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2269 }
2270
2271 /*
2272 * The guest's view of its CR3 is unblemished with nested paging when the
2273 * guest is using paging or we have unrestricted guest execution to handle
2274 * the guest when it's not using paging.
2275 */
2276 }
2277#ifndef IN_NEM_DARWIN
2278 else
2279 {
2280 /*
2281 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2282 * thinks it accesses physical memory directly, we use our identity-mapped
2283 * page table to map guest-linear to guest-physical addresses. EPT takes care
2284 * of translating it to host-physical addresses.
2285 */
2286 RTGCPHYS GCPhys;
2287 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2288
2289 /* We obtain it here every time as the guest could have relocated this PCI region. */
2290 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2291 if (RT_SUCCESS(rc))
2292 { /* likely */ }
2293 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2294 {
2295 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2296 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2297 }
2298 else
2299 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2300
2301 u64GuestCr3 = GCPhys;
2302 }
2303#endif
2304
2305 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2306 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2307 AssertRC(rc);
2308 }
2309 else
2310 {
2311 Assert(!pVmxTransient->fIsNestedGuest);
2312 /* Non-nested paging case, just use the hypervisor's CR3. */
2313 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2314
2315 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2316 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2317 AssertRC(rc);
2318 }
2319
2320 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2321 }
2322
2323 /*
2324 * Guest CR4.
2325 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2326 */
2327 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2328 {
2329 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2330 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2331
2332 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2333 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2334
2335 /*
2336 * With nested-guests, we may have extended the guest/host mask here (since we
2337 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2338 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2339 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2340 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2341 */
2342 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2343 uint64_t u64GuestCr4 = pCtx->cr4;
2344 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2345 ? pCtx->cr4
2346 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2347 Assert(!RT_HI_U32(u64GuestCr4));
2348
2349#ifndef IN_NEM_DARWIN
2350 /*
2351 * Setup VT-x's view of the guest CR4.
2352 *
2353 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2354 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2355 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2356 *
2357 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2358 */
2359 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2360 {
2361 Assert(pVM->hm.s.vmx.pRealModeTSS);
2362 Assert(PDMVmmDevHeapIsEnabled(pVM));
2363 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2364 }
2365#endif
2366
2367 if (VM_IS_VMX_NESTED_PAGING(pVM))
2368 {
2369 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2370 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2371 {
2372 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2373 u64GuestCr4 |= X86_CR4_PSE;
2374 /* Our identity mapping is a 32-bit page directory. */
2375 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2376 }
2377 /* else use guest CR4.*/
2378 }
2379 else
2380 {
2381 Assert(!pVmxTransient->fIsNestedGuest);
2382
2383 /*
2384 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2385 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2386 */
2387 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2388 {
2389 case PGMMODE_REAL: /* Real-mode. */
2390 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2391 case PGMMODE_32_BIT: /* 32-bit paging. */
2392 {
2393 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2394 break;
2395 }
2396
2397 case PGMMODE_PAE: /* PAE paging. */
2398 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2399 {
2400 u64GuestCr4 |= X86_CR4_PAE;
2401 break;
2402 }
2403
2404 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2405 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2406 {
2407#ifdef VBOX_WITH_64_BITS_GUESTS
2408 /* For our assumption in vmxHCShouldSwapEferMsr. */
2409 Assert(u64GuestCr4 & X86_CR4_PAE);
2410 break;
2411#endif
2412 }
2413 default:
2414 AssertFailed();
2415 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2416 }
2417 }
2418
2419 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2420 u64GuestCr4 |= fSetCr4;
2421 u64GuestCr4 &= fZapCr4;
2422
2423 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2424 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2425 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2426
2427#ifndef IN_NEM_DARWIN
2428 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2429 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2430 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2431 {
2432 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2433 hmR0VmxUpdateStartVmFunction(pVCpu);
2434 }
2435#endif
2436
2437 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2438
2439 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2440 }
2441 return rc;
2442}
2443
2444
2445#ifdef VBOX_STRICT
2446/**
2447 * Strict function to validate segment registers.
2448 *
2449 * @param pVCpu The cross context virtual CPU structure.
2450 * @param pVmcsInfo The VMCS info. object.
2451 *
2452 * @remarks Will import guest CR0 on strict builds during validation of
2453 * segments.
2454 */
2455static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2456{
2457 /*
2458 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2459 *
2460 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2461 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2462 * unusable bit and doesn't change the guest-context value.
2463 */
2464 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2465 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2466 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2467 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2468 && ( !CPUMIsGuestInRealModeEx(pCtx)
2469 && !CPUMIsGuestInV86ModeEx(pCtx)))
2470 {
2471 /* Protected mode checks */
2472 /* CS */
2473 Assert(pCtx->cs.Attr.n.u1Present);
2474 Assert(!(pCtx->cs.Attr.u & 0xf00));
2475 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2476 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2477 || !(pCtx->cs.Attr.n.u1Granularity));
2478 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2479 || (pCtx->cs.Attr.n.u1Granularity));
2480 /* CS cannot be loaded with NULL in protected mode. */
2481 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2482 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2483 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2484 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2485 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2486 else
2487 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2488 /* SS */
2489 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2490 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2491 if ( !(pCtx->cr0 & X86_CR0_PE)
2492 || pCtx->cs.Attr.n.u4Type == 3)
2493 {
2494 Assert(!pCtx->ss.Attr.n.u2Dpl);
2495 }
2496 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2497 {
2498 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2499 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2500 Assert(pCtx->ss.Attr.n.u1Present);
2501 Assert(!(pCtx->ss.Attr.u & 0xf00));
2502 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2503 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2504 || !(pCtx->ss.Attr.n.u1Granularity));
2505 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2506 || (pCtx->ss.Attr.n.u1Granularity));
2507 }
2508 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2509 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2510 {
2511 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2512 Assert(pCtx->ds.Attr.n.u1Present);
2513 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2514 Assert(!(pCtx->ds.Attr.u & 0xf00));
2515 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2516 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2517 || !(pCtx->ds.Attr.n.u1Granularity));
2518 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2519 || (pCtx->ds.Attr.n.u1Granularity));
2520 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2521 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2522 }
2523 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2524 {
2525 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2526 Assert(pCtx->es.Attr.n.u1Present);
2527 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2528 Assert(!(pCtx->es.Attr.u & 0xf00));
2529 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2530 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2531 || !(pCtx->es.Attr.n.u1Granularity));
2532 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2533 || (pCtx->es.Attr.n.u1Granularity));
2534 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2535 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2536 }
2537 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2538 {
2539 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2540 Assert(pCtx->fs.Attr.n.u1Present);
2541 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2542 Assert(!(pCtx->fs.Attr.u & 0xf00));
2543 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2544 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2545 || !(pCtx->fs.Attr.n.u1Granularity));
2546 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2547 || (pCtx->fs.Attr.n.u1Granularity));
2548 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2549 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2550 }
2551 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2552 {
2553 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2554 Assert(pCtx->gs.Attr.n.u1Present);
2555 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2556 Assert(!(pCtx->gs.Attr.u & 0xf00));
2557 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2558 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2559 || !(pCtx->gs.Attr.n.u1Granularity));
2560 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2561 || (pCtx->gs.Attr.n.u1Granularity));
2562 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2563 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2564 }
2565 /* 64-bit capable CPUs. */
2566 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2567 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2568 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2569 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2570 }
2571 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2572 || ( CPUMIsGuestInRealModeEx(pCtx)
2573 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2574 {
2575 /* Real and v86 mode checks. */
2576 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2577 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2578#ifndef IN_NEM_DARWIN
2579 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2580 {
2581 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2582 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2583 }
2584 else
2585#endif
2586 {
2587 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2588 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2589 }
2590
2591 /* CS */
2592 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2593 Assert(pCtx->cs.u32Limit == 0xffff);
2594 Assert(u32CSAttr == 0xf3);
2595 /* SS */
2596 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2597 Assert(pCtx->ss.u32Limit == 0xffff);
2598 Assert(u32SSAttr == 0xf3);
2599 /* DS */
2600 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2601 Assert(pCtx->ds.u32Limit == 0xffff);
2602 Assert(u32DSAttr == 0xf3);
2603 /* ES */
2604 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2605 Assert(pCtx->es.u32Limit == 0xffff);
2606 Assert(u32ESAttr == 0xf3);
2607 /* FS */
2608 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2609 Assert(pCtx->fs.u32Limit == 0xffff);
2610 Assert(u32FSAttr == 0xf3);
2611 /* GS */
2612 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2613 Assert(pCtx->gs.u32Limit == 0xffff);
2614 Assert(u32GSAttr == 0xf3);
2615 /* 64-bit capable CPUs. */
2616 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2617 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2618 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2619 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2620 }
2621}
2622#endif /* VBOX_STRICT */
2623
2624
2625/**
2626 * Exports a guest segment register into the guest-state area in the VMCS.
2627 *
2628 * @returns VBox status code.
2629 * @param pVCpu The cross context virtual CPU structure.
2630 * @param pVmcsInfo The VMCS info. object.
2631 * @param iSegReg The segment register number (X86_SREG_XXX).
2632 * @param pSelReg Pointer to the segment selector.
2633 *
2634 * @remarks No-long-jump zone!!!
2635 */
2636static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2637{
2638 Assert(iSegReg < X86_SREG_COUNT);
2639
2640 uint32_t u32Access = pSelReg->Attr.u;
2641#ifndef IN_NEM_DARWIN
2642 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2643#endif
2644 {
2645 /*
2646 * The way to differentiate between whether this is really a null selector or was just
2647 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2648 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2649 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2650 * NULL selectors loaded in protected-mode have their attribute as 0.
2651 */
2652 if (u32Access)
2653 { }
2654 else
2655 u32Access = X86DESCATTR_UNUSABLE;
2656 }
2657#ifndef IN_NEM_DARWIN
2658 else
2659 {
2660 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2661 u32Access = 0xf3;
2662 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2663 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2664 RT_NOREF_PV(pVCpu);
2665 }
2666#else
2667 RT_NOREF(pVmcsInfo);
2668#endif
2669
2670 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2671 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2672 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2673
2674 /*
2675 * Commit it to the VMCS.
2676 */
2677 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2678 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2679 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2680 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2681 return VINF_SUCCESS;
2682}
2683
2684
2685/**
2686 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2687 * area in the VMCS.
2688 *
2689 * @returns VBox status code.
2690 * @param pVCpu The cross context virtual CPU structure.
2691 * @param pVmxTransient The VMX-transient structure.
2692 *
2693 * @remarks Will import guest CR0 on strict builds during validation of
2694 * segments.
2695 * @remarks No-long-jump zone!!!
2696 */
2697static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2698{
2699 int rc = VERR_INTERNAL_ERROR_5;
2700#ifndef IN_NEM_DARWIN
2701 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2702#endif
2703 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2704 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2705#ifndef IN_NEM_DARWIN
2706 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2707#endif
2708
2709 /*
2710 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2711 */
2712 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2713 {
2714 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2715 {
2716 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2717#ifndef IN_NEM_DARWIN
2718 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2719 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2720#endif
2721 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2722 AssertRC(rc);
2723 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2724 }
2725
2726 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2727 {
2728 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2729#ifndef IN_NEM_DARWIN
2730 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2731 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2732#endif
2733 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2734 AssertRC(rc);
2735 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2736 }
2737
2738 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2739 {
2740 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2741#ifndef IN_NEM_DARWIN
2742 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2743 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2744#endif
2745 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2746 AssertRC(rc);
2747 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2748 }
2749
2750 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2751 {
2752 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2753#ifndef IN_NEM_DARWIN
2754 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2755 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2756#endif
2757 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2758 AssertRC(rc);
2759 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2760 }
2761
2762 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2763 {
2764 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2765#ifndef IN_NEM_DARWIN
2766 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2767 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2768#endif
2769 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2770 AssertRC(rc);
2771 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2772 }
2773
2774 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2775 {
2776 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2777#ifndef IN_NEM_DARWIN
2778 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2779 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2780#endif
2781 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2782 AssertRC(rc);
2783 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2784 }
2785
2786#ifdef VBOX_STRICT
2787 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2788#endif
2789 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2790 pCtx->cs.Attr.u));
2791 }
2792
2793 /*
2794 * Guest TR.
2795 */
2796 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2797 {
2798 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2799
2800 /*
2801 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2802 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2803 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2804 */
2805 uint16_t u16Sel;
2806 uint32_t u32Limit;
2807 uint64_t u64Base;
2808 uint32_t u32AccessRights;
2809#ifndef IN_NEM_DARWIN
2810 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2811#endif
2812 {
2813 u16Sel = pCtx->tr.Sel;
2814 u32Limit = pCtx->tr.u32Limit;
2815 u64Base = pCtx->tr.u64Base;
2816 u32AccessRights = pCtx->tr.Attr.u;
2817 }
2818#ifndef IN_NEM_DARWIN
2819 else
2820 {
2821 Assert(!pVmxTransient->fIsNestedGuest);
2822 Assert(pVM->hm.s.vmx.pRealModeTSS);
2823 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2824
2825 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2826 RTGCPHYS GCPhys;
2827 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2828 AssertRCReturn(rc, rc);
2829
2830 X86DESCATTR DescAttr;
2831 DescAttr.u = 0;
2832 DescAttr.n.u1Present = 1;
2833 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2834
2835 u16Sel = 0;
2836 u32Limit = HM_VTX_TSS_SIZE;
2837 u64Base = GCPhys;
2838 u32AccessRights = DescAttr.u;
2839 }
2840#endif
2841
2842 /* Validate. */
2843 Assert(!(u16Sel & RT_BIT(2)));
2844 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2845 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2846 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2847 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2848 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2849 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2850 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2851 Assert( (u32Limit & 0xfff) == 0xfff
2852 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2853 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2854 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2855
2856 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2857 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2858 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2859 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2860
2861 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2862 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2863 }
2864
2865 /*
2866 * Guest GDTR.
2867 */
2868 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2869 {
2870 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2871
2872 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2873 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2874
2875 /* Validate. */
2876 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2877
2878 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2879 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2880 }
2881
2882 /*
2883 * Guest LDTR.
2884 */
2885 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2886 {
2887 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2888
2889 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2890 uint32_t u32Access;
2891 if ( !pVmxTransient->fIsNestedGuest
2892 && !pCtx->ldtr.Attr.u)
2893 u32Access = X86DESCATTR_UNUSABLE;
2894 else
2895 u32Access = pCtx->ldtr.Attr.u;
2896
2897 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2898 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2899 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2900 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2901
2902 /* Validate. */
2903 if (!(u32Access & X86DESCATTR_UNUSABLE))
2904 {
2905 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2906 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2907 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2908 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2909 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2910 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2911 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2912 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2913 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2914 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2915 }
2916
2917 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2918 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2919 }
2920
2921 /*
2922 * Guest IDTR.
2923 */
2924 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2925 {
2926 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2927
2928 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2929 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2930
2931 /* Validate. */
2932 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2933
2934 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2935 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2936 }
2937
2938 return VINF_SUCCESS;
2939}
2940
2941
2942/**
2943 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2944 * VM-exit interruption info type.
2945 *
2946 * @returns The IEM exception flags.
2947 * @param uVector The event vector.
2948 * @param uVmxEventType The VMX event type.
2949 *
2950 * @remarks This function currently only constructs flags required for
2951 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2952 * and CR2 aspects of an exception are not included).
2953 */
2954static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2955{
2956 uint32_t fIemXcptFlags;
2957 switch (uVmxEventType)
2958 {
2959 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2960 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2961 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2962 break;
2963
2964 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2965 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2966 break;
2967
2968 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2969 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2970 break;
2971
2972 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2973 {
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2975 if (uVector == X86_XCPT_BP)
2976 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2977 else if (uVector == X86_XCPT_OF)
2978 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2979 else
2980 {
2981 fIemXcptFlags = 0;
2982 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2983 }
2984 break;
2985 }
2986
2987 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2988 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2989 break;
2990
2991 default:
2992 fIemXcptFlags = 0;
2993 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2994 break;
2995 }
2996 return fIemXcptFlags;
2997}
2998
2999
3000/**
3001 * Sets an event as a pending event to be injected into the guest.
3002 *
3003 * @param pVCpu The cross context virtual CPU structure.
3004 * @param u32IntInfo The VM-entry interruption-information field.
3005 * @param cbInstr The VM-entry instruction length in bytes (for
3006 * software interrupts, exceptions and privileged
3007 * software exceptions).
3008 * @param u32ErrCode The VM-entry exception error code.
3009 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3010 * page-fault.
3011 */
3012DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3013 RTGCUINTPTR GCPtrFaultAddress)
3014{
3015 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3016 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3017 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3018 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3019 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3020 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3021}
3022
3023
3024/**
3025 * Sets an external interrupt as pending-for-injection into the VM.
3026 *
3027 * @param pVCpu The cross context virtual CPU structure.
3028 * @param u8Interrupt The external interrupt vector.
3029 */
3030DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3031{
3032 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3033 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3034 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3035 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3036 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3037}
3038
3039
3040/**
3041 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3042 *
3043 * @param pVCpu The cross context virtual CPU structure.
3044 */
3045DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3046{
3047 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3048 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3049 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3050 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3051 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3052}
3053
3054
3055/**
3056 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3057 *
3058 * @param pVCpu The cross context virtual CPU structure.
3059 */
3060DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3061{
3062 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3063 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3064 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3065 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3066 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3067}
3068
3069
3070/**
3071 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3072 *
3073 * @param pVCpu The cross context virtual CPU structure.
3074 */
3075DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3076{
3077 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3078 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3079 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3080 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3081 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3082}
3083
3084
3085/**
3086 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3087 *
3088 * @param pVCpu The cross context virtual CPU structure.
3089 */
3090DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3091{
3092 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3093 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3094 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3095 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3096 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3097}
3098
3099
3100#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3101/**
3102 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3103 *
3104 * @param pVCpu The cross context virtual CPU structure.
3105 * @param u32ErrCode The error code for the general-protection exception.
3106 */
3107DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3108{
3109 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3110 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3111 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3112 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3113 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3114}
3115
3116
3117/**
3118 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3119 *
3120 * @param pVCpu The cross context virtual CPU structure.
3121 * @param u32ErrCode The error code for the stack exception.
3122 */
3123DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3124{
3125 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3127 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3129 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3130}
3131#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3132
3133
3134/**
3135 * Fixes up attributes for the specified segment register.
3136 *
3137 * @param pVCpu The cross context virtual CPU structure.
3138 * @param pSelReg The segment register that needs fixing.
3139 * @param pszRegName The register name (for logging and assertions).
3140 */
3141static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3142{
3143 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3144
3145 /*
3146 * If VT-x marks the segment as unusable, most other bits remain undefined:
3147 * - For CS the L, D and G bits have meaning.
3148 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3149 * - For the remaining data segments no bits are defined.
3150 *
3151 * The present bit and the unusable bit has been observed to be set at the
3152 * same time (the selector was supposed to be invalid as we started executing
3153 * a V8086 interrupt in ring-0).
3154 *
3155 * What should be important for the rest of the VBox code, is that the P bit is
3156 * cleared. Some of the other VBox code recognizes the unusable bit, but
3157 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3158 * safe side here, we'll strip off P and other bits we don't care about. If
3159 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3160 *
3161 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3162 */
3163#ifdef VBOX_STRICT
3164 uint32_t const uAttr = pSelReg->Attr.u;
3165#endif
3166
3167 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3168 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3169 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3170
3171#ifdef VBOX_STRICT
3172# ifndef IN_NEM_DARWIN
3173 VMMRZCallRing3Disable(pVCpu);
3174# endif
3175 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3176# ifdef DEBUG_bird
3177 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3178 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3179 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3180# endif
3181# ifndef IN_NEM_DARWIN
3182 VMMRZCallRing3Enable(pVCpu);
3183# endif
3184 NOREF(uAttr);
3185#endif
3186 RT_NOREF2(pVCpu, pszRegName);
3187}
3188
3189
3190/**
3191 * Imports a guest segment register from the current VMCS into the guest-CPU
3192 * context.
3193 *
3194 * @param pVCpu The cross context virtual CPU structure.
3195 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3196 *
3197 * @remarks Called with interrupts and/or preemption disabled.
3198 */
3199template<uint32_t const a_iSegReg>
3200DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3201{
3202 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3203 /* Check that the macros we depend upon here and in the export parenter function works: */
3204#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3205 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3206 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3207 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3208 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3209 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3210 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3211 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3212 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3213 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3214 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3215
3216 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3217
3218 uint16_t u16Sel;
3219 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3220 pSelReg->Sel = u16Sel;
3221 pSelReg->ValidSel = u16Sel;
3222
3223 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3224 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3225
3226 uint32_t u32Attr;
3227 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3228 pSelReg->Attr.u = u32Attr;
3229 if (u32Attr & X86DESCATTR_UNUSABLE)
3230 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3231
3232 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3233}
3234
3235
3236/**
3237 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3238 *
3239 * @param pVCpu The cross context virtual CPU structure.
3240 *
3241 * @remarks Called with interrupts and/or preemption disabled.
3242 */
3243DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3244{
3245 uint16_t u16Sel;
3246 uint64_t u64Base;
3247 uint32_t u32Limit, u32Attr;
3248 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3249 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3250 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3251 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3252
3253 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3254 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3255 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3256 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3257 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3258 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3259 if (u32Attr & X86DESCATTR_UNUSABLE)
3260 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3261}
3262
3263
3264/**
3265 * Imports the guest TR from the current VMCS into the guest-CPU context.
3266 *
3267 * @param pVCpu The cross context virtual CPU structure.
3268 *
3269 * @remarks Called with interrupts and/or preemption disabled.
3270 */
3271DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3272{
3273 uint16_t u16Sel;
3274 uint64_t u64Base;
3275 uint32_t u32Limit, u32Attr;
3276 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3277 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3278 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3279 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3280
3281 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3282 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3283 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3284 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3285 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3286 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3287 /* TR is the only selector that can never be unusable. */
3288 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3289}
3290
3291
3292/**
3293 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3294 *
3295 * @param pVCpu The cross context virtual CPU structure.
3296 *
3297 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3298 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3299 * instead!!!
3300 */
3301DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3302{
3303 uint64_t u64Val;
3304 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3305 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3306 {
3307 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3308 AssertRC(rc);
3309
3310 pCtx->rip = u64Val;
3311 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3312 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3313 }
3314}
3315
3316
3317/**
3318 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3319 *
3320 * @param pVCpu The cross context virtual CPU structure.
3321 * @param pVmcsInfo The VMCS info. object.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3328{
3329 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3330 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3331 {
3332 uint64_t u64Val;
3333 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3334 AssertRC(rc);
3335
3336 pCtx->rflags.u64 = u64Val;
3337#ifndef IN_NEM_DARWIN
3338 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3339 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3340 {
3341 pCtx->eflags.Bits.u1VM = 0;
3342 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3343 }
3344#else
3345 RT_NOREF(pVmcsInfo);
3346#endif
3347 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3348 }
3349}
3350
3351
3352/**
3353 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3354 * context.
3355 *
3356 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3357 *
3358 * @param pVCpu The cross context virtual CPU structure.
3359 * @param pVmcsInfo The VMCS info. object.
3360 *
3361 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3362 * do not log!
3363 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3364 * instead!!!
3365 */
3366DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3367{
3368 uint32_t u32Val;
3369 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3370 if (!u32Val)
3371 {
3372 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3373 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3374/** @todo r=bird: This is a call which isn't necessary most of the time, this
3375 * path is taken on basically all exits. Try find a way to eliminating it. */
3376 CPUMSetGuestNmiBlocking(pVCpu, false);
3377 }
3378 else
3379 {
3380/** @todo consider this branch for non-inlining. */
3381 /*
3382 * We must import RIP here to set our EM interrupt-inhibited state.
3383 * We also import RFLAGS as our code that evaluates pending interrupts
3384 * before VM-entry requires it.
3385 */
3386 vmxHCImportGuestRip(pVCpu);
3387 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3388
3389 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3390 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3391 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3392 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3393
3394 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3395 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3396 }
3397}
3398
3399
3400/**
3401 * Worker for VMXR0ImportStateOnDemand.
3402 *
3403 * @returns VBox status code.
3404 * @param pVCpu The cross context virtual CPU structure.
3405 * @param pVmcsInfo The VMCS info. object.
3406 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3407 */
3408static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3409{
3410 int rc = VINF_SUCCESS;
3411 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3412 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3413 uint32_t u32Val;
3414
3415 /*
3416 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3417 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3418 * neither are other host platforms.
3419 *
3420 * Committing this temporarily as it prevents BSOD.
3421 *
3422 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3423 */
3424#ifdef RT_OS_WINDOWS
3425 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3426 return VERR_HM_IPE_1;
3427#endif
3428
3429 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3430
3431#ifndef IN_NEM_DARWIN
3432 /*
3433 * We disable interrupts to make the updating of the state and in particular
3434 * the fExtrn modification atomic wrt to preemption hooks.
3435 */
3436 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3437#endif
3438
3439 fWhat &= pCtx->fExtrn;
3440 if (fWhat)
3441 {
3442 do
3443 {
3444 if (fWhat & CPUMCTX_EXTRN_RIP)
3445 vmxHCImportGuestRip(pVCpu);
3446
3447 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3448 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3449
3450 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3451 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3452 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3453
3454 if (fWhat & CPUMCTX_EXTRN_RSP)
3455 {
3456 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3457 AssertRC(rc);
3458 }
3459
3460 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3461 {
3462 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3463#ifndef IN_NEM_DARWIN
3464 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3465#else
3466 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3467#endif
3468 if (fWhat & CPUMCTX_EXTRN_CS)
3469 {
3470 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3471 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3472 if (fRealOnV86Active)
3473 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3474 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3475 }
3476 if (fWhat & CPUMCTX_EXTRN_SS)
3477 {
3478 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3479 if (fRealOnV86Active)
3480 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3481 }
3482 if (fWhat & CPUMCTX_EXTRN_DS)
3483 {
3484 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3485 if (fRealOnV86Active)
3486 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3487 }
3488 if (fWhat & CPUMCTX_EXTRN_ES)
3489 {
3490 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3491 if (fRealOnV86Active)
3492 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3493 }
3494 if (fWhat & CPUMCTX_EXTRN_FS)
3495 {
3496 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3497 if (fRealOnV86Active)
3498 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3499 }
3500 if (fWhat & CPUMCTX_EXTRN_GS)
3501 {
3502 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3503 if (fRealOnV86Active)
3504 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3505 }
3506 }
3507
3508 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3509 {
3510 if (fWhat & CPUMCTX_EXTRN_LDTR)
3511 vmxHCImportGuestLdtr(pVCpu);
3512
3513 if (fWhat & CPUMCTX_EXTRN_GDTR)
3514 {
3515 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3516 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3517 pCtx->gdtr.cbGdt = u32Val;
3518 }
3519
3520 /* Guest IDTR. */
3521 if (fWhat & CPUMCTX_EXTRN_IDTR)
3522 {
3523 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3524 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3525 pCtx->idtr.cbIdt = u32Val;
3526 }
3527
3528 /* Guest TR. */
3529 if (fWhat & CPUMCTX_EXTRN_TR)
3530 {
3531#ifndef IN_NEM_DARWIN
3532 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3533 don't need to import that one. */
3534 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3535#endif
3536 vmxHCImportGuestTr(pVCpu);
3537 }
3538 }
3539
3540 if (fWhat & CPUMCTX_EXTRN_DR7)
3541 {
3542#ifndef IN_NEM_DARWIN
3543 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3544#endif
3545 {
3546 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3547 AssertRC(rc);
3548 }
3549 }
3550
3551 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3552 {
3553 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3554 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3555 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3556 pCtx->SysEnter.cs = u32Val;
3557 }
3558
3559#ifndef IN_NEM_DARWIN
3560 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3561 {
3562 if ( pVM->hmr0.s.fAllow64BitGuests
3563 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3564 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3565 }
3566
3567 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3568 {
3569 if ( pVM->hmr0.s.fAllow64BitGuests
3570 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3571 {
3572 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3573 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3574 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3575 }
3576 }
3577
3578 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3579 {
3580 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3581 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3582 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3583 Assert(pMsrs);
3584 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3585 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3586 for (uint32_t i = 0; i < cMsrs; i++)
3587 {
3588 uint32_t const idMsr = pMsrs[i].u32Msr;
3589 switch (idMsr)
3590 {
3591 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3592 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3593 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3594 default:
3595 {
3596 uint32_t idxLbrMsr;
3597 if (VM_IS_VMX_LBR(pVM))
3598 {
3599 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3600 {
3601 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3602 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3603 break;
3604 }
3605 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3606 {
3607 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3608 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3609 break;
3610 }
3611 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3612 {
3613 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3614 break;
3615 }
3616 /* Fallthru (no break) */
3617 }
3618 pCtx->fExtrn = 0;
3619 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3620 ASMSetFlags(fEFlags);
3621 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3622 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3623 }
3624 }
3625 }
3626 }
3627#endif
3628
3629 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3630 {
3631 if (fWhat & CPUMCTX_EXTRN_CR0)
3632 {
3633 uint64_t u64Cr0;
3634 uint64_t u64Shadow;
3635 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3636 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3637#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3638 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3639 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3640#else
3641 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3642 {
3643 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3644 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3645 }
3646 else
3647 {
3648 /*
3649 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3650 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3651 * re-construct CR0. See @bugref{9180#c95} for details.
3652 */
3653 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3654 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3655 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3656 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3657 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3658 }
3659#endif
3660#ifndef IN_NEM_DARWIN
3661 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3662#endif
3663 CPUMSetGuestCR0(pVCpu, u64Cr0);
3664#ifndef IN_NEM_DARWIN
3665 VMMRZCallRing3Enable(pVCpu);
3666#endif
3667 }
3668
3669 if (fWhat & CPUMCTX_EXTRN_CR4)
3670 {
3671 uint64_t u64Cr4;
3672 uint64_t u64Shadow;
3673 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3674 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3675#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3676 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3677 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3678#else
3679 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3680 {
3681 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3682 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3683 }
3684 else
3685 {
3686 /*
3687 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3688 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3689 * re-construct CR4. See @bugref{9180#c95} for details.
3690 */
3691 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3692 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3693 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3694 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3695 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3696 }
3697#endif
3698 pCtx->cr4 = u64Cr4;
3699 }
3700
3701 if (fWhat & CPUMCTX_EXTRN_CR3)
3702 {
3703 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3704 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3705 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3706 && CPUMIsGuestPagingEnabledEx(pCtx)))
3707 {
3708 uint64_t u64Cr3;
3709 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3710 if (pCtx->cr3 != u64Cr3)
3711 {
3712 pCtx->cr3 = u64Cr3;
3713 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3714 }
3715
3716 /*
3717 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3718 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3719 */
3720 if (CPUMIsGuestInPAEModeEx(pCtx))
3721 {
3722 X86PDPE aPaePdpes[4];
3723 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3724 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3725 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3726 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3727 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3728 {
3729 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3730 /* PGM now updates PAE PDPTEs while updating CR3. */
3731 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3732 }
3733 }
3734 }
3735 }
3736 }
3737
3738#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3739 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3740 {
3741 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3742 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3743 {
3744 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3745 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3746 if (RT_SUCCESS(rc))
3747 { /* likely */ }
3748 else
3749 break;
3750 }
3751 }
3752#endif
3753 } while (0);
3754
3755 if (RT_SUCCESS(rc))
3756 {
3757 /* Update fExtrn. */
3758 pCtx->fExtrn &= ~fWhat;
3759
3760 /* If everything has been imported, clear the HM keeper bit. */
3761 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3762 {
3763#ifndef IN_NEM_DARWIN
3764 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3765#else
3766 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3767#endif
3768 Assert(!pCtx->fExtrn);
3769 }
3770 }
3771 }
3772#ifndef IN_NEM_DARWIN
3773 else
3774 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3775
3776 /*
3777 * Restore interrupts.
3778 */
3779 ASMSetFlags(fEFlags);
3780#endif
3781
3782 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3783
3784 if (RT_SUCCESS(rc))
3785 { /* likely */ }
3786 else
3787 return rc;
3788
3789 /*
3790 * Honor any pending CR3 updates.
3791 *
3792 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3793 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3794 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3795 *
3796 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3797 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3798 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3799 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3800 *
3801 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3802 *
3803 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3804 */
3805 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3806#ifndef IN_NEM_DARWIN
3807 && VMMRZCallRing3IsEnabled(pVCpu)
3808#endif
3809 )
3810 {
3811 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3812 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3813 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3814 }
3815
3816 return VINF_SUCCESS;
3817}
3818
3819
3820/**
3821 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3822 *
3823 * @returns VBox status code.
3824 * @param pVCpu The cross context virtual CPU structure.
3825 * @param pVmcsInfo The VMCS info. object.
3826 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3827 * in NEM/darwin context.
3828 * @tparam a_fWhat What to import, zero or more bits from
3829 * HMVMX_CPUMCTX_EXTRN_ALL.
3830 */
3831template<uint64_t const a_fWhat>
3832static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3833{
3834 AssertCompile(a_fWhat != 0);
3835 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3836 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3837 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3838
3839 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3840
3841 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3842
3843 /* RIP and RFLAGS may have been imported already by the post exit code
3844 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3845 of the code is skipping this part of the code. */
3846 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3847 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3848 {
3849 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3850 vmxHCImportGuestRip(pVCpu);
3851
3852 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3853 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3854 }
3855
3856 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3857 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3858 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3859
3860 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3861 {
3862 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3863 AssertRC(rc);
3864 }
3865
3866 if (a_fWhat & CPUMCTX_EXTRN_SREG_MASK)
3867 {
3868 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3869#ifndef IN_NEM_DARWIN
3870 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3871#else
3872 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3873#endif
3874 if (a_fWhat & CPUMCTX_EXTRN_CS)
3875 {
3876 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3877 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3878 if (fRealOnV86Active)
3879 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3880 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3881 }
3882 if (a_fWhat & CPUMCTX_EXTRN_SS)
3883 {
3884 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3885 if (fRealOnV86Active)
3886 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3887 }
3888 if (a_fWhat & CPUMCTX_EXTRN_DS)
3889 {
3890 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3891 if (fRealOnV86Active)
3892 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3893 }
3894 if (a_fWhat & CPUMCTX_EXTRN_ES)
3895 {
3896 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3897 if (fRealOnV86Active)
3898 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3899 }
3900 if (a_fWhat & CPUMCTX_EXTRN_FS)
3901 {
3902 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3903 if (fRealOnV86Active)
3904 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3905 }
3906 if (a_fWhat & CPUMCTX_EXTRN_GS)
3907 {
3908 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3909 if (fRealOnV86Active)
3910 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3911 }
3912 }
3913
3914 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3915 vmxHCImportGuestLdtr(pVCpu);
3916
3917 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3918 {
3919 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3920 uint32_t u32Val;
3921 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3922 pVCpu->cpum.GstCtx.gdtr.cbGdt = u32Val;
3923 }
3924
3925 /* Guest IDTR. */
3926 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3927 {
3928 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3929 uint32_t u32Val;
3930 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3931 pVCpu->cpum.GstCtx.idtr.cbIdt = u32Val;
3932 }
3933
3934 /* Guest TR. */
3935 if (a_fWhat & CPUMCTX_EXTRN_TR)
3936 {
3937#ifndef IN_NEM_DARWIN
3938 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3939 don't need to import that one. */
3940 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3941#endif
3942 vmxHCImportGuestTr(pVCpu);
3943 }
3944
3945 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3946 {
3947#ifndef IN_NEM_DARWIN
3948 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3949#endif
3950 {
3951 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3952 AssertRC(rc);
3953 }
3954 }
3955
3956 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3957 {
3958 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
3959 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
3960 uint32_t u32Val;
3961 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
3962 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
3963 }
3964
3965#ifndef IN_NEM_DARWIN
3966 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3967 {
3968 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
3969 && pVM->hmr0.s.fAllow64BitGuests)
3970 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3971 }
3972
3973 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3974 {
3975 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
3976 && pVM->hmr0.s.fAllow64BitGuests)
3977 {
3978 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3979 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
3980 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3981 }
3982 }
3983
3984 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3985 {
3986 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3987 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3988 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3989 Assert(pMsrs);
3990 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3991 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3992 for (uint32_t i = 0; i < cMsrs; i++)
3993 {
3994 uint32_t const idMsr = pMsrs[i].u32Msr;
3995 switch (idMsr)
3996 {
3997 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3998 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3999 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4000 default:
4001 {
4002 uint32_t idxLbrMsr;
4003 if (VM_IS_VMX_LBR(pVM))
4004 {
4005 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4006 {
4007 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4008 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4009 break;
4010 }
4011 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4012 {
4013 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4014 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4015 break;
4016 }
4017 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4018 {
4019 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4020 break;
4021 }
4022 }
4023 pVCpu->cpum.GstCtx.fExtrn = 0;
4024 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4025 ASMSetFlags(fEFlags);
4026 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4027 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4028 }
4029 }
4030 }
4031 }
4032#endif
4033
4034 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4035 {
4036 uint64_t u64Cr0;
4037 uint64_t u64Shadow;
4038 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4039 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4040#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4041 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4042 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4043#else
4044 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4045 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4046 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4047 else
4048 {
4049 /*
4050 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4051 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4052 * re-construct CR0. See @bugref{9180#c95} for details.
4053 */
4054 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4055 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4056 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4057 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4058 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4059 }
4060#endif
4061#ifndef IN_NEM_DARWIN
4062 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4063#endif
4064 CPUMSetGuestCR0(pVCpu, u64Cr0);
4065#ifndef IN_NEM_DARWIN
4066 VMMRZCallRing3Enable(pVCpu);
4067#endif
4068 }
4069
4070 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4071 {
4072 uint64_t u64Cr4;
4073 uint64_t u64Shadow;
4074 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4075 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4076#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4077 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4078 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4079#else
4080 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4081 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4082 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4083 else
4084 {
4085 /*
4086 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4087 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4088 * re-construct CR4. See @bugref{9180#c95} for details.
4089 */
4090 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4091 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4092 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4093 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4094 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4095 }
4096#endif
4097 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4098 }
4099
4100 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4101 {
4102 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4103 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4104 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4105 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4106 {
4107 uint64_t u64Cr3;
4108 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4109 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4110 {
4111 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4112 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4113 }
4114
4115 /*
4116 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4117 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4118 */
4119 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4120 {
4121 X86PDPE aPaePdpes[4];
4122 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4123 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4124 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4125 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4126 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4127 {
4128 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4129 /* PGM now updates PAE PDPTEs while updating CR3. */
4130 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4131 }
4132 }
4133 }
4134 }
4135
4136#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4137 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4138 {
4139 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4140 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4141 {
4142 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4143 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4144 AssertRCReturn(rc, rc);
4145 }
4146 }
4147#endif
4148
4149 /* Update fExtrn. */
4150 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4151
4152 /* If everything has been imported, clear the HM keeper bit. */
4153 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4154 {
4155#ifndef IN_NEM_DARWIN
4156 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4157#else
4158 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4159#endif
4160 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4161 }
4162
4163 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4164
4165 /*
4166 * Honor any pending CR3 updates.
4167 *
4168 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4169 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4170 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4171 *
4172 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4173 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4174 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4175 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4176 *
4177 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4178 *
4179 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4180 */
4181#ifndef IN_NEM_DARWIN
4182 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4183 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4184 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4185 return VINF_SUCCESS;
4186 ASMSetFlags(fEFlags);
4187#else
4188 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4189 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4190 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4191 return VINF_SUCCESS;
4192#endif
4193
4194 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4195 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4196 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4197 return VINF_SUCCESS;
4198}
4199
4200
4201/**
4202 * Internal state fetcher.
4203 *
4204 * @returns VBox status code.
4205 * @param pVCpu The cross context virtual CPU structure.
4206 * @param pVmcsInfo The VMCS info. object.
4207 * @param pszCaller For logging.
4208 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4209 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4210 * already. This is ORed together with @a a_fWhat when
4211 * calculating what needs fetching (just for safety).
4212 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4213 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4214 * already. This is ORed together with @a a_fWhat when
4215 * calculating what needs fetching (just for safety).
4216 */
4217template<uint64_t const a_fWhat,
4218 uint64_t const a_fDoneLocal = 0,
4219 uint64_t const a_fDonePostExit = 0
4220#ifndef IN_NEM_DARWIN
4221 | CPUMCTX_EXTRN_INHIBIT_INT
4222 | CPUMCTX_EXTRN_INHIBIT_NMI
4223# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4224 | HMVMX_CPUMCTX_EXTRN_ALL
4225# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4226 | CPUMCTX_EXTRN_RFLAGS
4227# endif
4228#else /* IN_NEM_DARWIN */
4229 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4230#endif /* IN_NEM_DARWIN */
4231>
4232DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4233{
4234 RT_NOREF_PV(pszCaller);
4235 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4236 {
4237#ifndef IN_NEM_DARWIN
4238 /*
4239 * We disable interrupts to make the updating of the state and in particular
4240 * the fExtrn modification atomic wrt to preemption hooks.
4241 */
4242 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4243#else
4244 RTCCUINTREG const fEFlags = 0;
4245#endif
4246
4247 /*
4248 * We combine all three parameters and take the (probably) inlined optimized
4249 * code path for the new things specified in a_fWhat.
4250 *
4251 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4252 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4253 * also take the streamlined path when both of these are cleared in fExtrn
4254 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4255 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4256 */
4257 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4258 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4259 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4260 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4261 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */)
4262 && fWhatToDo != 0 /*possible when CPUMCTX_EXTRN_ALL is used post-exit*/))
4263 {
4264 int const rc = vmxHCImportGuestStateInner< a_fWhat
4265 & HMVMX_CPUMCTX_EXTRN_ALL
4266 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4267#ifndef IN_NEM_DARWIN
4268 ASMSetFlags(fEFlags);
4269#endif
4270 return rc;
4271 }
4272
4273#ifndef IN_NEM_DARWIN
4274 ASMSetFlags(fEFlags);
4275#endif
4276
4277 /*
4278 * We shouldn't normally get here, but it may happen when executing
4279 * in the debug run-loops. Typically, everything should already have
4280 * been fetched then. Otherwise call the fallback state import function.
4281 */
4282 if (fWhatToDo == 0)
4283 { /* hope the cause was the debug loop or something similar */ }
4284 else
4285 {
4286 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4287 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4288 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4289 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4290 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4291 }
4292 }
4293 return VINF_SUCCESS;
4294}
4295
4296
4297/**
4298 * Check per-VM and per-VCPU force flag actions that require us to go back to
4299 * ring-3 for one reason or another.
4300 *
4301 * @returns Strict VBox status code (i.e. informational status codes too)
4302 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4303 * ring-3.
4304 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4305 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4306 * interrupts)
4307 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4308 * all EMTs to be in ring-3.
4309 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4310 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4311 * to the EM loop.
4312 *
4313 * @param pVCpu The cross context virtual CPU structure.
4314 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4315 * @param fStepping Whether we are single-stepping the guest using the
4316 * hypervisor debugger.
4317 *
4318 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4319 * is no longer in VMX non-root mode.
4320 */
4321static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4322{
4323#ifndef IN_NEM_DARWIN
4324 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4325#endif
4326
4327 /*
4328 * Update pending interrupts into the APIC's IRR.
4329 */
4330 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4331 APICUpdatePendingInterrupts(pVCpu);
4332
4333 /*
4334 * Anything pending? Should be more likely than not if we're doing a good job.
4335 */
4336 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4337 if ( !fStepping
4338 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4339 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4340 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4341 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4342 return VINF_SUCCESS;
4343
4344 /* Pending PGM C3 sync. */
4345 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4346 {
4347 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4348 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4349 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4350 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4351 if (rcStrict != VINF_SUCCESS)
4352 {
4353 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4354 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4355 return rcStrict;
4356 }
4357 }
4358
4359 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4360 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4361 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4362 {
4363 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4364 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4365 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4366 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4367 return rc;
4368 }
4369
4370 /* Pending VM request packets, such as hardware interrupts. */
4371 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4372 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4373 {
4374 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4375 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4376 return VINF_EM_PENDING_REQUEST;
4377 }
4378
4379 /* Pending PGM pool flushes. */
4380 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4381 {
4382 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4383 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4384 return VINF_PGM_POOL_FLUSH_PENDING;
4385 }
4386
4387 /* Pending DMA requests. */
4388 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4389 {
4390 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4391 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4392 return VINF_EM_RAW_TO_R3;
4393 }
4394
4395#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4396 /*
4397 * Pending nested-guest events.
4398 *
4399 * Please note the priority of these events are specified and important.
4400 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4401 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4402 */
4403 if (fIsNestedGuest)
4404 {
4405 /* Pending nested-guest APIC-write. */
4406 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4407 {
4408 Log4Func(("Pending nested-guest APIC-write\n"));
4409 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4410 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4411 return rcStrict;
4412 }
4413
4414 /* Pending nested-guest monitor-trap flag (MTF). */
4415 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4416 {
4417 Log4Func(("Pending nested-guest MTF\n"));
4418 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4419 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4420 return rcStrict;
4421 }
4422
4423 /* Pending nested-guest VMX-preemption timer expired. */
4424 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4425 {
4426 Log4Func(("Pending nested-guest preempt timer\n"));
4427 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4428 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4429 return rcStrict;
4430 }
4431 }
4432#else
4433 NOREF(fIsNestedGuest);
4434#endif
4435
4436 return VINF_SUCCESS;
4437}
4438
4439
4440/**
4441 * Converts any TRPM trap into a pending HM event. This is typically used when
4442 * entering from ring-3 (not longjmp returns).
4443 *
4444 * @param pVCpu The cross context virtual CPU structure.
4445 */
4446static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4447{
4448 Assert(TRPMHasTrap(pVCpu));
4449 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4450
4451 uint8_t uVector;
4452 TRPMEVENT enmTrpmEvent;
4453 uint32_t uErrCode;
4454 RTGCUINTPTR GCPtrFaultAddress;
4455 uint8_t cbInstr;
4456 bool fIcebp;
4457
4458 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4459 AssertRC(rc);
4460
4461 uint32_t u32IntInfo;
4462 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4463 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4464
4465 rc = TRPMResetTrap(pVCpu);
4466 AssertRC(rc);
4467 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4468 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4469
4470 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4471}
4472
4473
4474/**
4475 * Converts the pending HM event into a TRPM trap.
4476 *
4477 * @param pVCpu The cross context virtual CPU structure.
4478 */
4479static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4480{
4481 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4482
4483 /* If a trap was already pending, we did something wrong! */
4484 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4485
4486 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4487 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4488 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4489
4490 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4491
4492 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4493 AssertRC(rc);
4494
4495 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4496 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4497
4498 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4499 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4500 else
4501 {
4502 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4503 switch (uVectorType)
4504 {
4505 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4506 TRPMSetTrapDueToIcebp(pVCpu);
4507 RT_FALL_THRU();
4508 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4509 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4510 {
4511 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4512 || ( uVector == X86_XCPT_BP /* INT3 */
4513 || uVector == X86_XCPT_OF /* INTO */
4514 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4515 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4516 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4517 break;
4518 }
4519 }
4520 }
4521
4522 /* We're now done converting the pending event. */
4523 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4524}
4525
4526
4527/**
4528 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4529 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4530 *
4531 * @param pVCpu The cross context virtual CPU structure.
4532 * @param pVmcsInfo The VMCS info. object.
4533 */
4534static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4535{
4536 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4537 {
4538 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4539 {
4540 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4541 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4542 AssertRC(rc);
4543 }
4544 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4545}
4546
4547
4548/**
4549 * Clears the interrupt-window exiting control in the VMCS.
4550 *
4551 * @param pVCpu The cross context virtual CPU structure.
4552 * @param pVmcsInfo The VMCS info. object.
4553 */
4554DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4555{
4556 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4557 {
4558 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4559 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4560 AssertRC(rc);
4561 }
4562}
4563
4564
4565/**
4566 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4567 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4568 *
4569 * @param pVCpu The cross context virtual CPU structure.
4570 * @param pVmcsInfo The VMCS info. object.
4571 */
4572static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4573{
4574 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4575 {
4576 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4577 {
4578 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4579 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4580 AssertRC(rc);
4581 Log4Func(("Setup NMI-window exiting\n"));
4582 }
4583 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4584}
4585
4586
4587/**
4588 * Clears the NMI-window exiting control in the VMCS.
4589 *
4590 * @param pVCpu The cross context virtual CPU structure.
4591 * @param pVmcsInfo The VMCS info. object.
4592 */
4593DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4594{
4595 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4596 {
4597 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4598 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4599 AssertRC(rc);
4600 }
4601}
4602
4603
4604/**
4605 * Injects an event into the guest upon VM-entry by updating the relevant fields
4606 * in the VM-entry area in the VMCS.
4607 *
4608 * @returns Strict VBox status code (i.e. informational status codes too).
4609 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4610 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4611 *
4612 * @param pVCpu The cross context virtual CPU structure.
4613 * @param pVmcsInfo The VMCS info object.
4614 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4615 * @param pEvent The event being injected.
4616 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4617 * will be updated if necessary. This cannot not be NULL.
4618 * @param fStepping Whether we're single-stepping guest execution and should
4619 * return VINF_EM_DBG_STEPPED if the event is injected
4620 * directly (registers modified by us, not by hardware on
4621 * VM-entry).
4622 */
4623static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4624 bool fStepping, uint32_t *pfIntrState)
4625{
4626 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4627 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4628 Assert(pfIntrState);
4629
4630#ifdef IN_NEM_DARWIN
4631 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4632#endif
4633
4634 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4635 uint32_t u32IntInfo = pEvent->u64IntInfo;
4636 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4637 uint32_t const cbInstr = pEvent->cbInstr;
4638 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4639 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4640 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4641
4642#ifdef VBOX_STRICT
4643 /*
4644 * Validate the error-code-valid bit for hardware exceptions.
4645 * No error codes for exceptions in real-mode.
4646 *
4647 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4648 */
4649 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4650 && !CPUMIsGuestInRealModeEx(pCtx))
4651 {
4652 switch (uVector)
4653 {
4654 case X86_XCPT_PF:
4655 case X86_XCPT_DF:
4656 case X86_XCPT_TS:
4657 case X86_XCPT_NP:
4658 case X86_XCPT_SS:
4659 case X86_XCPT_GP:
4660 case X86_XCPT_AC:
4661 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4662 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4663 RT_FALL_THRU();
4664 default:
4665 break;
4666 }
4667 }
4668
4669 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4670 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4671 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4672#endif
4673
4674 RT_NOREF(uVector);
4675 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4676 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4677 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4678 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4679 {
4680 Assert(uVector <= X86_XCPT_LAST);
4681 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4682 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4683 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4684 }
4685 else
4686 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4687
4688 /*
4689 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4690 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4691 * interrupt handler in the (real-mode) guest.
4692 *
4693 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4694 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4695 */
4696 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4697 {
4698#ifndef IN_NEM_DARWIN
4699 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4700#endif
4701 {
4702 /*
4703 * For CPUs with unrestricted guest execution enabled and with the guest
4704 * in real-mode, we must not set the deliver-error-code bit.
4705 *
4706 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4707 */
4708 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4709 }
4710#ifndef IN_NEM_DARWIN
4711 else
4712 {
4713 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4714 Assert(PDMVmmDevHeapIsEnabled(pVM));
4715 Assert(pVM->hm.s.vmx.pRealModeTSS);
4716 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4717
4718 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4719 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4720 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4721 AssertRCReturn(rc2, rc2);
4722
4723 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4724 size_t const cbIdtEntry = sizeof(X86IDTR16);
4725 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4726 {
4727 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4728 if (uVector == X86_XCPT_DF)
4729 return VINF_EM_RESET;
4730
4731 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4732 No error codes for exceptions in real-mode. */
4733 if (uVector == X86_XCPT_GP)
4734 {
4735 static HMEVENT const s_EventXcptDf
4736 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4737 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4738 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4739 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4740 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4741 }
4742
4743 /*
4744 * If we're injecting an event with no valid IDT entry, inject a #GP.
4745 * No error codes for exceptions in real-mode.
4746 *
4747 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4748 */
4749 static HMEVENT const s_EventXcptGp
4750 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4751 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4752 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4753 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4754 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4755 }
4756
4757 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4758 uint16_t uGuestIp = pCtx->ip;
4759 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4760 {
4761 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4762 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4763 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4764 }
4765 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4766 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4767
4768 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4769 X86IDTR16 IdtEntry;
4770 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4771 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4772 AssertRCReturn(rc2, rc2);
4773
4774 /* Construct the stack frame for the interrupt/exception handler. */
4775 VBOXSTRICTRC rcStrict;
4776 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4777 if (rcStrict == VINF_SUCCESS)
4778 {
4779 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4780 if (rcStrict == VINF_SUCCESS)
4781 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4782 }
4783
4784 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4785 if (rcStrict == VINF_SUCCESS)
4786 {
4787 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4788 pCtx->rip = IdtEntry.offSel;
4789 pCtx->cs.Sel = IdtEntry.uSel;
4790 pCtx->cs.ValidSel = IdtEntry.uSel;
4791 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4792 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4793 && uVector == X86_XCPT_PF)
4794 pCtx->cr2 = GCPtrFault;
4795
4796 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4797 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4798 | HM_CHANGED_GUEST_RSP);
4799
4800 /*
4801 * If we delivered a hardware exception (other than an NMI) and if there was
4802 * block-by-STI in effect, we should clear it.
4803 */
4804 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4805 {
4806 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4807 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4808 Log4Func(("Clearing inhibition due to STI\n"));
4809 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4810 }
4811
4812 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4813 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4814
4815 /*
4816 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4817 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4818 */
4819 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4820
4821 /*
4822 * If we eventually support nested-guest execution without unrestricted guest execution,
4823 * we should set fInterceptEvents here.
4824 */
4825 Assert(!fIsNestedGuest);
4826
4827 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4828 if (fStepping)
4829 rcStrict = VINF_EM_DBG_STEPPED;
4830 }
4831 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4832 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4833 return rcStrict;
4834 }
4835#else
4836 RT_NOREF(pVmcsInfo);
4837#endif
4838 }
4839
4840 /*
4841 * Validate.
4842 */
4843 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4844 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4845
4846 /*
4847 * Inject the event into the VMCS.
4848 */
4849 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4850 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4851 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4852 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4853 AssertRC(rc);
4854
4855 /*
4856 * Update guest CR2 if this is a page-fault.
4857 */
4858 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4859 pCtx->cr2 = GCPtrFault;
4860
4861 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4862 return VINF_SUCCESS;
4863}
4864
4865
4866/**
4867 * Evaluates the event to be delivered to the guest and sets it as the pending
4868 * event.
4869 *
4870 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4871 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4872 * NOT restore these force-flags.
4873 *
4874 * @returns Strict VBox status code (i.e. informational status codes too).
4875 * @param pVCpu The cross context virtual CPU structure.
4876 * @param pVmcsInfo The VMCS information structure.
4877 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4878 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4879 */
4880static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4881{
4882 Assert(pfIntrState);
4883 Assert(!TRPMHasTrap(pVCpu));
4884
4885 /*
4886 * Compute/update guest-interruptibility state related FFs.
4887 * The FFs will be used below while evaluating events to be injected.
4888 */
4889 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4890
4891 /*
4892 * Evaluate if a new event needs to be injected.
4893 * An event that's already pending has already performed all necessary checks.
4894 */
4895 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4896 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4897 {
4898 /** @todo SMI. SMIs take priority over NMIs. */
4899
4900 /*
4901 * NMIs.
4902 * NMIs take priority over external interrupts.
4903 */
4904#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4905 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4906#endif
4907 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4908 {
4909 /*
4910 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4911 *
4912 * For a nested-guest, the FF always indicates the outer guest's ability to
4913 * receive an NMI while the guest-interruptibility state bit depends on whether
4914 * the nested-hypervisor is using virtual-NMIs.
4915 */
4916 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4917 {
4918#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4919 if ( fIsNestedGuest
4920 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4921 return IEMExecVmxVmexitXcptNmi(pVCpu);
4922#endif
4923 vmxHCSetPendingXcptNmi(pVCpu);
4924 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4925 Log4Func(("NMI pending injection\n"));
4926
4927 /* We've injected the NMI, bail. */
4928 return VINF_SUCCESS;
4929 }
4930 else if (!fIsNestedGuest)
4931 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4932 }
4933
4934 /*
4935 * External interrupts (PIC/APIC).
4936 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4937 * We cannot re-request the interrupt from the controller again.
4938 */
4939 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4940 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4941 {
4942 Assert(!DBGFIsStepping(pVCpu));
4943 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4944 AssertRC(rc);
4945
4946 /*
4947 * We must not check EFLAGS directly when executing a nested-guest, use
4948 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4949 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4950 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4951 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4952 *
4953 * See Intel spec. 25.4.1 "Event Blocking".
4954 */
4955 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4956 {
4957#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4958 if ( fIsNestedGuest
4959 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4960 {
4961 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4962 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4963 return rcStrict;
4964 }
4965#endif
4966 uint8_t u8Interrupt;
4967 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4968 if (RT_SUCCESS(rc))
4969 {
4970#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4971 if ( fIsNestedGuest
4972 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4973 {
4974 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4975 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4976 return rcStrict;
4977 }
4978#endif
4979 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4980 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4981 }
4982 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4983 {
4984 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4985
4986 if ( !fIsNestedGuest
4987 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4988 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4989 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4990
4991 /*
4992 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4993 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4994 * need to re-set this force-flag here.
4995 */
4996 }
4997 else
4998 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4999
5000 /* We've injected the interrupt or taken necessary action, bail. */
5001 return VINF_SUCCESS;
5002 }
5003 if (!fIsNestedGuest)
5004 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5005 }
5006 }
5007 else if (!fIsNestedGuest)
5008 {
5009 /*
5010 * An event is being injected or we are in an interrupt shadow. Check if another event is
5011 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5012 * the pending event.
5013 */
5014 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5015 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5016 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5017 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5018 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5019 }
5020 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5021
5022 return VINF_SUCCESS;
5023}
5024
5025
5026/**
5027 * Injects any pending events into the guest if the guest is in a state to
5028 * receive them.
5029 *
5030 * @returns Strict VBox status code (i.e. informational status codes too).
5031 * @param pVCpu The cross context virtual CPU structure.
5032 * @param pVmcsInfo The VMCS information structure.
5033 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5034 * @param fIntrState The VT-x guest-interruptibility state.
5035 * @param fStepping Whether we are single-stepping the guest using the
5036 * hypervisor debugger and should return
5037 * VINF_EM_DBG_STEPPED if the event was dispatched
5038 * directly.
5039 */
5040static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5041 uint32_t fIntrState, bool fStepping)
5042{
5043 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5044#ifndef IN_NEM_DARWIN
5045 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5046#endif
5047
5048#ifdef VBOX_STRICT
5049 /*
5050 * Verify guest-interruptibility state.
5051 *
5052 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5053 * since injecting an event may modify the interruptibility state and we must thus always
5054 * use fIntrState.
5055 */
5056 {
5057 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5058 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5059 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5060 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5061 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5062 Assert(!TRPMHasTrap(pVCpu));
5063 NOREF(fBlockMovSS); NOREF(fBlockSti);
5064 }
5065#endif
5066
5067 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5068 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5069 {
5070 /*
5071 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5072 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5073 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5074 *
5075 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5076 */
5077 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5078#ifdef VBOX_STRICT
5079 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5080 {
5081 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
5082 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5083 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5084 }
5085 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5086 {
5087 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5088 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5089 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5090 }
5091#endif
5092 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5093 uIntType));
5094
5095 /*
5096 * Inject the event and get any changes to the guest-interruptibility state.
5097 *
5098 * The guest-interruptibility state may need to be updated if we inject the event
5099 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5100 */
5101 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5102 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5103
5104 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5105 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5106 else
5107 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5108 }
5109
5110 /*
5111 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5112 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5113 */
5114 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5115 && !fIsNestedGuest)
5116 {
5117 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5118
5119 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5120 {
5121 /*
5122 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5123 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5124 */
5125 Assert(!DBGFIsStepping(pVCpu));
5126 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
5127 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5128 AssertRC(rc);
5129 }
5130 else
5131 {
5132 /*
5133 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5134 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5135 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5136 * we use MTF, so just make sure it's called before executing guest-code.
5137 */
5138 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5139 }
5140 }
5141 /* else: for nested-guest currently handling while merging controls. */
5142
5143 /*
5144 * Finally, update the guest-interruptibility state.
5145 *
5146 * This is required for the real-on-v86 software interrupt injection, for
5147 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5148 */
5149 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5150 AssertRC(rc);
5151
5152 /*
5153 * There's no need to clear the VM-entry interruption-information field here if we're not
5154 * injecting anything. VT-x clears the valid bit on every VM-exit.
5155 *
5156 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5157 */
5158
5159 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5160 return rcStrict;
5161}
5162
5163
5164/**
5165 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5166 * and update error record fields accordingly.
5167 *
5168 * @returns VMX_IGS_* error codes.
5169 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5170 * wrong with the guest state.
5171 *
5172 * @param pVCpu The cross context virtual CPU structure.
5173 * @param pVmcsInfo The VMCS info. object.
5174 *
5175 * @remarks This function assumes our cache of the VMCS controls
5176 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5177 */
5178static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5179{
5180#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5181#define HMVMX_CHECK_BREAK(expr, err) do { \
5182 if (!(expr)) { uError = (err); break; } \
5183 } while (0)
5184
5185 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5186 uint32_t uError = VMX_IGS_ERROR;
5187 uint32_t u32IntrState = 0;
5188#ifndef IN_NEM_DARWIN
5189 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5190 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5191#else
5192 bool const fUnrestrictedGuest = true;
5193#endif
5194 do
5195 {
5196 int rc;
5197
5198 /*
5199 * Guest-interruptibility state.
5200 *
5201 * Read this first so that any check that fails prior to those that actually
5202 * require the guest-interruptibility state would still reflect the correct
5203 * VMCS value and avoids causing further confusion.
5204 */
5205 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5206 AssertRC(rc);
5207
5208 uint32_t u32Val;
5209 uint64_t u64Val;
5210
5211 /*
5212 * CR0.
5213 */
5214 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5215 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5216 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5217 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5218 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5219 if (fUnrestrictedGuest)
5220 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5221
5222 uint64_t u64GuestCr0;
5223 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5224 AssertRC(rc);
5225 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5226 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5227 if ( !fUnrestrictedGuest
5228 && (u64GuestCr0 & X86_CR0_PG)
5229 && !(u64GuestCr0 & X86_CR0_PE))
5230 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5231
5232 /*
5233 * CR4.
5234 */
5235 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5236 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5237 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5238
5239 uint64_t u64GuestCr4;
5240 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5241 AssertRC(rc);
5242 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5243 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5244
5245 /*
5246 * IA32_DEBUGCTL MSR.
5247 */
5248 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5249 AssertRC(rc);
5250 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5251 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5252 {
5253 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5254 }
5255 uint64_t u64DebugCtlMsr = u64Val;
5256
5257#ifdef VBOX_STRICT
5258 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5259 AssertRC(rc);
5260 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5261#endif
5262 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5263
5264 /*
5265 * RIP and RFLAGS.
5266 */
5267 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5268 AssertRC(rc);
5269 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5270 if ( !fLongModeGuest
5271 || !pCtx->cs.Attr.n.u1Long)
5272 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5273 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5274 * must be identical if the "IA-32e mode guest" VM-entry
5275 * control is 1 and CS.L is 1. No check applies if the
5276 * CPU supports 64 linear-address bits. */
5277
5278 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5279 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5280 AssertRC(rc);
5281 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5282 VMX_IGS_RFLAGS_RESERVED);
5283 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5284 uint32_t const u32Eflags = u64Val;
5285
5286 if ( fLongModeGuest
5287 || ( fUnrestrictedGuest
5288 && !(u64GuestCr0 & X86_CR0_PE)))
5289 {
5290 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5291 }
5292
5293 uint32_t u32EntryInfo;
5294 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5295 AssertRC(rc);
5296 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5297 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5298
5299 /*
5300 * 64-bit checks.
5301 */
5302 if (fLongModeGuest)
5303 {
5304 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5305 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5306 }
5307
5308 if ( !fLongModeGuest
5309 && (u64GuestCr4 & X86_CR4_PCIDE))
5310 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5311
5312 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5313 * 51:32 beyond the processor's physical-address width are 0. */
5314
5315 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5316 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5317 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5318
5319#ifndef IN_NEM_DARWIN
5320 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5321 AssertRC(rc);
5322 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5323
5324 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5325 AssertRC(rc);
5326 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5327#endif
5328
5329 /*
5330 * PERF_GLOBAL MSR.
5331 */
5332 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5333 {
5334 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5335 AssertRC(rc);
5336 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5337 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5338 }
5339
5340 /*
5341 * PAT MSR.
5342 */
5343 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5344 {
5345 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5346 AssertRC(rc);
5347 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5348 for (unsigned i = 0; i < 8; i++)
5349 {
5350 uint8_t u8Val = (u64Val & 0xff);
5351 if ( u8Val != 0 /* UC */
5352 && u8Val != 1 /* WC */
5353 && u8Val != 4 /* WT */
5354 && u8Val != 5 /* WP */
5355 && u8Val != 6 /* WB */
5356 && u8Val != 7 /* UC- */)
5357 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5358 u64Val >>= 8;
5359 }
5360 }
5361
5362 /*
5363 * EFER MSR.
5364 */
5365 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5366 {
5367 Assert(g_fHmVmxSupportsVmcsEfer);
5368 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5369 AssertRC(rc);
5370 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5371 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5372 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5373 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5374 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5375 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5376 * iemVmxVmentryCheckGuestState(). */
5377 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5378 || !(u64GuestCr0 & X86_CR0_PG)
5379 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5380 VMX_IGS_EFER_LMA_LME_MISMATCH);
5381 }
5382
5383 /*
5384 * Segment registers.
5385 */
5386 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5387 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5388 if (!(u32Eflags & X86_EFL_VM))
5389 {
5390 /* CS */
5391 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5392 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5393 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5394 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5395 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5396 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5397 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5398 /* CS cannot be loaded with NULL in protected mode. */
5399 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5400 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5401 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5402 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5403 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5404 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5405 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5406 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5407 else
5408 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5409
5410 /* SS */
5411 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5412 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5413 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5414 if ( !(pCtx->cr0 & X86_CR0_PE)
5415 || pCtx->cs.Attr.n.u4Type == 3)
5416 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5417
5418 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5419 {
5420 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5421 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5422 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5423 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5424 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5425 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5426 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5427 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5428 }
5429
5430 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5431 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5432 {
5433 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5434 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5435 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5436 || pCtx->ds.Attr.n.u4Type > 11
5437 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5438 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5439 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5440 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5441 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5442 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5443 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5444 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5445 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5446 }
5447 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5448 {
5449 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5450 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5451 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5452 || pCtx->es.Attr.n.u4Type > 11
5453 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5454 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5455 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5456 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5457 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5458 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5459 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5460 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5461 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5462 }
5463 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5464 {
5465 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5466 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5467 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5468 || pCtx->fs.Attr.n.u4Type > 11
5469 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5470 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5471 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5472 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5473 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5474 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5475 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5476 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5477 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5478 }
5479 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5480 {
5481 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5482 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5483 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5484 || pCtx->gs.Attr.n.u4Type > 11
5485 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5486 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5487 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5488 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5489 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5490 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5491 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5492 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5493 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5494 }
5495 /* 64-bit capable CPUs. */
5496 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5497 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5498 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5499 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5500 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5501 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5502 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5503 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5504 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5505 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5506 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5507 }
5508 else
5509 {
5510 /* V86 mode checks. */
5511 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5512 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5513 {
5514 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5515 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5516 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5517 }
5518 else
5519 {
5520 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5521 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5522 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5523 }
5524
5525 /* CS */
5526 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5527 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5528 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5529 /* SS */
5530 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5531 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5532 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5533 /* DS */
5534 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5535 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5536 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5537 /* ES */
5538 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5539 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5540 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5541 /* FS */
5542 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5543 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5544 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5545 /* GS */
5546 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5547 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5548 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5549 /* 64-bit capable CPUs. */
5550 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5551 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5552 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5553 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5554 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5555 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5556 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5557 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5558 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5559 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5560 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5561 }
5562
5563 /*
5564 * TR.
5565 */
5566 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5567 /* 64-bit capable CPUs. */
5568 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5569 if (fLongModeGuest)
5570 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5571 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5572 else
5573 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5574 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5575 VMX_IGS_TR_ATTR_TYPE_INVALID);
5576 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5577 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5578 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5579 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5580 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5581 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5582 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5583 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5584
5585 /*
5586 * GDTR and IDTR (64-bit capable checks).
5587 */
5588 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5589 AssertRC(rc);
5590 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5591
5592 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5593 AssertRC(rc);
5594 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5595
5596 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5597 AssertRC(rc);
5598 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5599
5600 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5601 AssertRC(rc);
5602 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5603
5604 /*
5605 * Guest Non-Register State.
5606 */
5607 /* Activity State. */
5608 uint32_t u32ActivityState;
5609 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5610 AssertRC(rc);
5611 HMVMX_CHECK_BREAK( !u32ActivityState
5612 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5613 VMX_IGS_ACTIVITY_STATE_INVALID);
5614 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5615 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5616
5617 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5618 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5619 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5620
5621 /** @todo Activity state and injecting interrupts. Left as a todo since we
5622 * currently don't use activity states but ACTIVE. */
5623
5624 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5625 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5626
5627 /* Guest interruptibility-state. */
5628 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5629 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5630 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5631 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5632 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5633 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5634 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5635 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5636 {
5637 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5638 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5639 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5640 }
5641 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5642 {
5643 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5644 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5645 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5646 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5647 }
5648 /** @todo Assumes the processor is not in SMM. */
5649 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5650 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5651 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5652 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5653 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5654 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5655 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5656 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5657
5658 /* Pending debug exceptions. */
5659 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5660 AssertRC(rc);
5661 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5662 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5663 u32Val = u64Val; /* For pending debug exceptions checks below. */
5664
5665 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5666 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5667 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5668 {
5669 if ( (u32Eflags & X86_EFL_TF)
5670 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5671 {
5672 /* Bit 14 is PendingDebug.BS. */
5673 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5674 }
5675 if ( !(u32Eflags & X86_EFL_TF)
5676 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5677 {
5678 /* Bit 14 is PendingDebug.BS. */
5679 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5680 }
5681 }
5682
5683#ifndef IN_NEM_DARWIN
5684 /* VMCS link pointer. */
5685 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5686 AssertRC(rc);
5687 if (u64Val != UINT64_C(0xffffffffffffffff))
5688 {
5689 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5690 /** @todo Bits beyond the processor's physical-address width MBZ. */
5691 /** @todo SMM checks. */
5692 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5693 Assert(pVmcsInfo->pvShadowVmcs);
5694 VMXVMCSREVID VmcsRevId;
5695 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5696 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5697 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5698 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5699 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5700 }
5701
5702 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5703 * not using nested paging? */
5704 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5705 && !fLongModeGuest
5706 && CPUMIsGuestInPAEModeEx(pCtx))
5707 {
5708 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5709 AssertRC(rc);
5710 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5711
5712 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5713 AssertRC(rc);
5714 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5715
5716 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5717 AssertRC(rc);
5718 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5719
5720 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5721 AssertRC(rc);
5722 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5723 }
5724#endif
5725
5726 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5727 if (uError == VMX_IGS_ERROR)
5728 uError = VMX_IGS_REASON_NOT_FOUND;
5729 } while (0);
5730
5731 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5732 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5733 return uError;
5734
5735#undef HMVMX_ERROR_BREAK
5736#undef HMVMX_CHECK_BREAK
5737}
5738
5739
5740#ifndef HMVMX_USE_FUNCTION_TABLE
5741/**
5742 * Handles a guest VM-exit from hardware-assisted VMX execution.
5743 *
5744 * @returns Strict VBox status code (i.e. informational status codes too).
5745 * @param pVCpu The cross context virtual CPU structure.
5746 * @param pVmxTransient The VMX-transient structure.
5747 */
5748DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5749{
5750#ifdef DEBUG_ramshankar
5751# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5752 do { \
5753 if (a_fSave != 0) \
5754 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5755 VBOXSTRICTRC rcStrict = a_CallExpr; \
5756 if (a_fSave != 0) \
5757 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5758 return rcStrict; \
5759 } while (0)
5760#else
5761# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5762#endif
5763 uint32_t const uExitReason = pVmxTransient->uExitReason;
5764 switch (uExitReason)
5765 {
5766 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5767 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5768 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5769 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5770 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5771 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5772 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5773 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5774 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5775 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5776 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5777 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5778 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5779 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5780 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5781 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5782 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5783 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5784 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5785 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5786 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5787 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5788 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5789 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5790 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5791 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5792 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5793 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5794 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5795 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5796#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5797 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5798 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5799 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5800 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5801 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5802 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5803 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5804 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5805 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5806 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5807#else
5808 case VMX_EXIT_VMCLEAR:
5809 case VMX_EXIT_VMLAUNCH:
5810 case VMX_EXIT_VMPTRLD:
5811 case VMX_EXIT_VMPTRST:
5812 case VMX_EXIT_VMREAD:
5813 case VMX_EXIT_VMRESUME:
5814 case VMX_EXIT_VMWRITE:
5815 case VMX_EXIT_VMXOFF:
5816 case VMX_EXIT_VMXON:
5817 case VMX_EXIT_INVVPID:
5818 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5819#endif
5820#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5821 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5822#else
5823 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5824#endif
5825
5826 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5827 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5828 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5829
5830 case VMX_EXIT_INIT_SIGNAL:
5831 case VMX_EXIT_SIPI:
5832 case VMX_EXIT_IO_SMI:
5833 case VMX_EXIT_SMI:
5834 case VMX_EXIT_ERR_MSR_LOAD:
5835 case VMX_EXIT_ERR_MACHINE_CHECK:
5836 case VMX_EXIT_PML_FULL:
5837 case VMX_EXIT_VIRTUALIZED_EOI:
5838 case VMX_EXIT_GDTR_IDTR_ACCESS:
5839 case VMX_EXIT_LDTR_TR_ACCESS:
5840 case VMX_EXIT_APIC_WRITE:
5841 case VMX_EXIT_RDRAND:
5842 case VMX_EXIT_RSM:
5843 case VMX_EXIT_VMFUNC:
5844 case VMX_EXIT_ENCLS:
5845 case VMX_EXIT_RDSEED:
5846 case VMX_EXIT_XSAVES:
5847 case VMX_EXIT_XRSTORS:
5848 case VMX_EXIT_UMWAIT:
5849 case VMX_EXIT_TPAUSE:
5850 case VMX_EXIT_LOADIWKEY:
5851 default:
5852 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5853 }
5854#undef VMEXIT_CALL_RET
5855}
5856#endif /* !HMVMX_USE_FUNCTION_TABLE */
5857
5858
5859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5860/**
5861 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5862 *
5863 * @returns Strict VBox status code (i.e. informational status codes too).
5864 * @param pVCpu The cross context virtual CPU structure.
5865 * @param pVmxTransient The VMX-transient structure.
5866 */
5867DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5868{
5869 uint32_t const uExitReason = pVmxTransient->uExitReason;
5870 switch (uExitReason)
5871 {
5872# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5873 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5874 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5875# else
5876 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5877 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5878# endif
5879 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5880 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5881 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5882
5883 /*
5884 * We shouldn't direct host physical interrupts to the nested-guest.
5885 */
5886 case VMX_EXIT_EXT_INT:
5887 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5888
5889 /*
5890 * Instructions that cause VM-exits unconditionally or the condition is
5891 * always taken solely from the nested hypervisor (meaning if the VM-exit
5892 * happens, it's guaranteed to be a nested-guest VM-exit).
5893 *
5894 * - Provides VM-exit instruction length ONLY.
5895 */
5896 case VMX_EXIT_CPUID: /* Unconditional. */
5897 case VMX_EXIT_VMCALL:
5898 case VMX_EXIT_GETSEC:
5899 case VMX_EXIT_INVD:
5900 case VMX_EXIT_XSETBV:
5901 case VMX_EXIT_VMLAUNCH:
5902 case VMX_EXIT_VMRESUME:
5903 case VMX_EXIT_VMXOFF:
5904 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5905 case VMX_EXIT_VMFUNC:
5906 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5907
5908 /*
5909 * Instructions that cause VM-exits unconditionally or the condition is
5910 * always taken solely from the nested hypervisor (meaning if the VM-exit
5911 * happens, it's guaranteed to be a nested-guest VM-exit).
5912 *
5913 * - Provides VM-exit instruction length.
5914 * - Provides VM-exit information.
5915 * - Optionally provides Exit qualification.
5916 *
5917 * Since Exit qualification is 0 for all VM-exits where it is not
5918 * applicable, reading and passing it to the guest should produce
5919 * defined behavior.
5920 *
5921 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5922 */
5923 case VMX_EXIT_INVEPT: /* Unconditional. */
5924 case VMX_EXIT_INVVPID:
5925 case VMX_EXIT_VMCLEAR:
5926 case VMX_EXIT_VMPTRLD:
5927 case VMX_EXIT_VMPTRST:
5928 case VMX_EXIT_VMXON:
5929 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5930 case VMX_EXIT_LDTR_TR_ACCESS:
5931 case VMX_EXIT_RDRAND:
5932 case VMX_EXIT_RDSEED:
5933 case VMX_EXIT_XSAVES:
5934 case VMX_EXIT_XRSTORS:
5935 case VMX_EXIT_UMWAIT:
5936 case VMX_EXIT_TPAUSE:
5937 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5938
5939 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5940 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5941 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5942 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5943 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5944 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5945 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5946 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5947 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5948 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5949 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5950 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5951 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5952 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5953 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5954 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5955 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5956 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5957 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5958
5959 case VMX_EXIT_PREEMPT_TIMER:
5960 {
5961 /** @todo NSTVMX: Preempt timer. */
5962 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5963 }
5964
5965 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5966 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5967
5968 case VMX_EXIT_VMREAD:
5969 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5970
5971 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5972 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5973
5974 case VMX_EXIT_INIT_SIGNAL:
5975 case VMX_EXIT_SIPI:
5976 case VMX_EXIT_IO_SMI:
5977 case VMX_EXIT_SMI:
5978 case VMX_EXIT_ERR_MSR_LOAD:
5979 case VMX_EXIT_ERR_MACHINE_CHECK:
5980 case VMX_EXIT_PML_FULL:
5981 case VMX_EXIT_RSM:
5982 default:
5983 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5984 }
5985}
5986#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5987
5988
5989/** @name VM-exit helpers.
5990 * @{
5991 */
5992/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5993/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5994/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5995
5996/** Macro for VM-exits called unexpectedly. */
5997#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5998 do { \
5999 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6000 return VERR_VMX_UNEXPECTED_EXIT; \
6001 } while (0)
6002
6003#ifdef VBOX_STRICT
6004# ifndef IN_NEM_DARWIN
6005/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6006# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6007 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6008
6009# define HMVMX_ASSERT_PREEMPT_CPUID() \
6010 do { \
6011 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6012 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6013 } while (0)
6014
6015# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6016 do { \
6017 AssertPtr((a_pVCpu)); \
6018 AssertPtr((a_pVmxTransient)); \
6019 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6020 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6021 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6022 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6023 Assert((a_pVmxTransient)->pVmcsInfo); \
6024 Assert(ASMIntAreEnabled()); \
6025 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6026 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6027 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6028 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6029 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6030 HMVMX_ASSERT_PREEMPT_CPUID(); \
6031 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6032 } while (0)
6033# else
6034# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6035# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6036# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6037 do { \
6038 AssertPtr((a_pVCpu)); \
6039 AssertPtr((a_pVmxTransient)); \
6040 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6041 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6042 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6043 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6044 Assert((a_pVmxTransient)->pVmcsInfo); \
6045 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6046 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6047 } while (0)
6048# endif
6049
6050# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6051 do { \
6052 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6053 Assert((a_pVmxTransient)->fIsNestedGuest); \
6054 } while (0)
6055
6056# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6057 do { \
6058 Log4Func(("\n")); \
6059 } while (0)
6060#else
6061# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6062 do { \
6063 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6064 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6065 } while (0)
6066
6067# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6068 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6069
6070# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6071#endif
6072
6073#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6074/** Macro that does the necessary privilege checks and intercepted VM-exits for
6075 * guests that attempted to execute a VMX instruction. */
6076# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6077 do \
6078 { \
6079 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6080 if (rcStrictTmp == VINF_SUCCESS) \
6081 { /* likely */ } \
6082 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6083 { \
6084 Assert((a_pVCpu)->hm.s.Event.fPending); \
6085 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6086 return VINF_SUCCESS; \
6087 } \
6088 else \
6089 { \
6090 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6091 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6092 } \
6093 } while (0)
6094
6095/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6096# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6097 do \
6098 { \
6099 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6100 (a_pGCPtrEffAddr)); \
6101 if (rcStrictTmp == VINF_SUCCESS) \
6102 { /* likely */ } \
6103 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6104 { \
6105 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6106 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6107 NOREF(uXcptTmp); \
6108 return VINF_SUCCESS; \
6109 } \
6110 else \
6111 { \
6112 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6113 return rcStrictTmp; \
6114 } \
6115 } while (0)
6116#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6117
6118
6119/**
6120 * Advances the guest RIP by the specified number of bytes.
6121 *
6122 * @param pVCpu The cross context virtual CPU structure.
6123 * @param cbInstr Number of bytes to advance the RIP by.
6124 *
6125 * @remarks No-long-jump zone!!!
6126 */
6127DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6128{
6129 /* Advance the RIP. */
6130 pVCpu->cpum.GstCtx.rip += cbInstr;
6131 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
6132
6133 /* Update interrupt inhibition. */
6134 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6135 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
6136 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6137}
6138
6139
6140/**
6141 * Advances the guest RIP after reading it from the VMCS.
6142 *
6143 * @returns VBox status code, no informational status codes.
6144 * @param pVCpu The cross context virtual CPU structure.
6145 * @param pVmxTransient The VMX-transient structure.
6146 *
6147 * @remarks No-long-jump zone!!!
6148 */
6149static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6150{
6151 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6152 /** @todo consider template here after checking callers. */
6153 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6154 AssertRCReturn(rc, rc);
6155
6156 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6157 return VINF_SUCCESS;
6158}
6159
6160
6161/**
6162 * Handle a condition that occurred while delivering an event through the guest or
6163 * nested-guest IDT.
6164 *
6165 * @returns Strict VBox status code (i.e. informational status codes too).
6166 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6167 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6168 * to continue execution of the guest which will delivery the \#DF.
6169 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6170 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6171 *
6172 * @param pVCpu The cross context virtual CPU structure.
6173 * @param pVmxTransient The VMX-transient structure.
6174 *
6175 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6176 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6177 * is due to an EPT violation, PML full or SPP-related event.
6178 *
6179 * @remarks No-long-jump zone!!!
6180 */
6181static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6182{
6183 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6184 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6185 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6186 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6187 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6188 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6189
6190 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6191 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6192 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6193 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6194 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6195 {
6196 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6197 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6198
6199 /*
6200 * If the event was a software interrupt (generated with INT n) or a software exception
6201 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6202 * can handle the VM-exit and continue guest execution which will re-execute the
6203 * instruction rather than re-injecting the exception, as that can cause premature
6204 * trips to ring-3 before injection and involve TRPM which currently has no way of
6205 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6206 * the problem).
6207 */
6208 IEMXCPTRAISE enmRaise;
6209 IEMXCPTRAISEINFO fRaiseInfo;
6210 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6211 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6212 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6213 {
6214 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6215 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6216 }
6217 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6218 {
6219 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6220 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6221 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6222
6223 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6224 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6225
6226 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6227
6228 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6229 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6230 {
6231 pVmxTransient->fVectoringPF = true;
6232 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6233 }
6234 }
6235 else
6236 {
6237 /*
6238 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6239 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6240 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6241 */
6242 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6243 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6244 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6245 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6246 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6247 }
6248
6249 /*
6250 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6251 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6252 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6253 * subsequent VM-entry would fail, see @bugref{7445}.
6254 *
6255 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6256 */
6257 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6258 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6259 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6260 && CPUMIsGuestNmiBlocking(pVCpu))
6261 {
6262 CPUMSetGuestNmiBlocking(pVCpu, false);
6263 }
6264
6265 switch (enmRaise)
6266 {
6267 case IEMXCPTRAISE_CURRENT_XCPT:
6268 {
6269 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6270 Assert(rcStrict == VINF_SUCCESS);
6271 break;
6272 }
6273
6274 case IEMXCPTRAISE_PREV_EVENT:
6275 {
6276 uint32_t u32ErrCode;
6277 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6278 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6279 else
6280 u32ErrCode = 0;
6281
6282 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6283 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6284 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6285 pVCpu->cpum.GstCtx.cr2);
6286
6287 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6288 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6289 Assert(rcStrict == VINF_SUCCESS);
6290 break;
6291 }
6292
6293 case IEMXCPTRAISE_REEXEC_INSTR:
6294 Assert(rcStrict == VINF_SUCCESS);
6295 break;
6296
6297 case IEMXCPTRAISE_DOUBLE_FAULT:
6298 {
6299 /*
6300 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6301 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6302 */
6303 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6304 {
6305 pVmxTransient->fVectoringDoublePF = true;
6306 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6307 pVCpu->cpum.GstCtx.cr2));
6308 rcStrict = VINF_SUCCESS;
6309 }
6310 else
6311 {
6312 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6313 vmxHCSetPendingXcptDF(pVCpu);
6314 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6315 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6316 rcStrict = VINF_HM_DOUBLE_FAULT;
6317 }
6318 break;
6319 }
6320
6321 case IEMXCPTRAISE_TRIPLE_FAULT:
6322 {
6323 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6324 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6325 rcStrict = VINF_EM_RESET;
6326 break;
6327 }
6328
6329 case IEMXCPTRAISE_CPU_HANG:
6330 {
6331 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6332 rcStrict = VERR_EM_GUEST_CPU_HANG;
6333 break;
6334 }
6335
6336 default:
6337 {
6338 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6339 rcStrict = VERR_VMX_IPE_2;
6340 break;
6341 }
6342 }
6343 }
6344 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6345 && !CPUMIsGuestNmiBlocking(pVCpu))
6346 {
6347 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6348 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6349 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6350 {
6351 /*
6352 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6353 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6354 * that virtual NMIs remain blocked until the IRET execution is completed.
6355 *
6356 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6357 */
6358 CPUMSetGuestNmiBlocking(pVCpu, true);
6359 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6360 }
6361 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6362 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6363 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6364 {
6365 /*
6366 * Execution of IRET caused an EPT violation, page-modification log-full event or
6367 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6368 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6369 * that virtual NMIs remain blocked until the IRET execution is completed.
6370 *
6371 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6372 */
6373 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6374 {
6375 CPUMSetGuestNmiBlocking(pVCpu, true);
6376 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6377 }
6378 }
6379 }
6380
6381 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6382 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6383 return rcStrict;
6384}
6385
6386
6387#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6388/**
6389 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6390 * guest attempting to execute a VMX instruction.
6391 *
6392 * @returns Strict VBox status code (i.e. informational status codes too).
6393 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6394 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6395 *
6396 * @param pVCpu The cross context virtual CPU structure.
6397 * @param uExitReason The VM-exit reason.
6398 *
6399 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6400 * @remarks No-long-jump zone!!!
6401 */
6402static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6403{
6404 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6405 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6406
6407 /*
6408 * The physical CPU would have already checked the CPU mode/code segment.
6409 * We shall just assert here for paranoia.
6410 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6411 */
6412 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6413 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6414 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6415
6416 if (uExitReason == VMX_EXIT_VMXON)
6417 {
6418 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6419
6420 /*
6421 * We check CR4.VMXE because it is required to be always set while in VMX operation
6422 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6423 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6424 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6425 */
6426 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6427 {
6428 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6429 vmxHCSetPendingXcptUD(pVCpu);
6430 return VINF_HM_PENDING_XCPT;
6431 }
6432 }
6433 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6434 {
6435 /*
6436 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6437 * (other than VMXON), we need to raise a #UD.
6438 */
6439 Log4Func(("Not in VMX root mode -> #UD\n"));
6440 vmxHCSetPendingXcptUD(pVCpu);
6441 return VINF_HM_PENDING_XCPT;
6442 }
6443
6444 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6445 return VINF_SUCCESS;
6446}
6447
6448
6449/**
6450 * Decodes the memory operand of an instruction that caused a VM-exit.
6451 *
6452 * The Exit qualification field provides the displacement field for memory
6453 * operand instructions, if any.
6454 *
6455 * @returns Strict VBox status code (i.e. informational status codes too).
6456 * @retval VINF_SUCCESS if the operand was successfully decoded.
6457 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6458 * operand.
6459 * @param pVCpu The cross context virtual CPU structure.
6460 * @param uExitInstrInfo The VM-exit instruction information field.
6461 * @param enmMemAccess The memory operand's access type (read or write).
6462 * @param GCPtrDisp The instruction displacement field, if any. For
6463 * RIP-relative addressing pass RIP + displacement here.
6464 * @param pGCPtrMem Where to store the effective destination memory address.
6465 *
6466 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6467 * virtual-8086 mode hence skips those checks while verifying if the
6468 * segment is valid.
6469 */
6470static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6471 PRTGCPTR pGCPtrMem)
6472{
6473 Assert(pGCPtrMem);
6474 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6475 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6476 | CPUMCTX_EXTRN_CR0);
6477
6478 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6479 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6480 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6481
6482 VMXEXITINSTRINFO ExitInstrInfo;
6483 ExitInstrInfo.u = uExitInstrInfo;
6484 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6485 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6486 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6487 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6488 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6489 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6490 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6491 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6492 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6493
6494 /*
6495 * Validate instruction information.
6496 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6497 */
6498 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6499 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6500 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6501 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6502 AssertLogRelMsgReturn(fIsMemOperand,
6503 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6504
6505 /*
6506 * Compute the complete effective address.
6507 *
6508 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6509 * See AMD spec. 4.5.2 "Segment Registers".
6510 */
6511 RTGCPTR GCPtrMem = GCPtrDisp;
6512 if (fBaseRegValid)
6513 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6514 if (fIdxRegValid)
6515 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6516
6517 RTGCPTR const GCPtrOff = GCPtrMem;
6518 if ( !fIsLongMode
6519 || iSegReg >= X86_SREG_FS)
6520 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6521 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6522
6523 /*
6524 * Validate effective address.
6525 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6526 */
6527 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6528 Assert(cbAccess > 0);
6529 if (fIsLongMode)
6530 {
6531 if (X86_IS_CANONICAL(GCPtrMem))
6532 {
6533 *pGCPtrMem = GCPtrMem;
6534 return VINF_SUCCESS;
6535 }
6536
6537 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6538 * "Data Limit Checks in 64-bit Mode". */
6539 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6540 vmxHCSetPendingXcptGP(pVCpu, 0);
6541 return VINF_HM_PENDING_XCPT;
6542 }
6543
6544 /*
6545 * This is a watered down version of iemMemApplySegment().
6546 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6547 * and segment CPL/DPL checks are skipped.
6548 */
6549 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6550 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6551 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6552
6553 /* Check if the segment is present and usable. */
6554 if ( pSel->Attr.n.u1Present
6555 && !pSel->Attr.n.u1Unusable)
6556 {
6557 Assert(pSel->Attr.n.u1DescType);
6558 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6559 {
6560 /* Check permissions for the data segment. */
6561 if ( enmMemAccess == VMXMEMACCESS_WRITE
6562 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6563 {
6564 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6565 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6566 return VINF_HM_PENDING_XCPT;
6567 }
6568
6569 /* Check limits if it's a normal data segment. */
6570 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6571 {
6572 if ( GCPtrFirst32 > pSel->u32Limit
6573 || GCPtrLast32 > pSel->u32Limit)
6574 {
6575 Log4Func(("Data segment limit exceeded. "
6576 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6577 GCPtrLast32, pSel->u32Limit));
6578 if (iSegReg == X86_SREG_SS)
6579 vmxHCSetPendingXcptSS(pVCpu, 0);
6580 else
6581 vmxHCSetPendingXcptGP(pVCpu, 0);
6582 return VINF_HM_PENDING_XCPT;
6583 }
6584 }
6585 else
6586 {
6587 /* Check limits if it's an expand-down data segment.
6588 Note! The upper boundary is defined by the B bit, not the G bit! */
6589 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6590 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6591 {
6592 Log4Func(("Expand-down data segment limit exceeded. "
6593 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6594 GCPtrLast32, pSel->u32Limit));
6595 if (iSegReg == X86_SREG_SS)
6596 vmxHCSetPendingXcptSS(pVCpu, 0);
6597 else
6598 vmxHCSetPendingXcptGP(pVCpu, 0);
6599 return VINF_HM_PENDING_XCPT;
6600 }
6601 }
6602 }
6603 else
6604 {
6605 /* Check permissions for the code segment. */
6606 if ( enmMemAccess == VMXMEMACCESS_WRITE
6607 || ( enmMemAccess == VMXMEMACCESS_READ
6608 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6609 {
6610 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6611 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6612 vmxHCSetPendingXcptGP(pVCpu, 0);
6613 return VINF_HM_PENDING_XCPT;
6614 }
6615
6616 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6617 if ( GCPtrFirst32 > pSel->u32Limit
6618 || GCPtrLast32 > pSel->u32Limit)
6619 {
6620 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6621 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6622 if (iSegReg == X86_SREG_SS)
6623 vmxHCSetPendingXcptSS(pVCpu, 0);
6624 else
6625 vmxHCSetPendingXcptGP(pVCpu, 0);
6626 return VINF_HM_PENDING_XCPT;
6627 }
6628 }
6629 }
6630 else
6631 {
6632 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6633 vmxHCSetPendingXcptGP(pVCpu, 0);
6634 return VINF_HM_PENDING_XCPT;
6635 }
6636
6637 *pGCPtrMem = GCPtrMem;
6638 return VINF_SUCCESS;
6639}
6640#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6641
6642
6643/**
6644 * VM-exit helper for LMSW.
6645 */
6646static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6647{
6648 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6649 AssertRCReturn(rc, rc);
6650
6651 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6652 AssertMsg( rcStrict == VINF_SUCCESS
6653 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6654
6655 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6656 if (rcStrict == VINF_IEM_RAISED_XCPT)
6657 {
6658 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6659 rcStrict = VINF_SUCCESS;
6660 }
6661
6662 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6663 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6664 return rcStrict;
6665}
6666
6667
6668/**
6669 * VM-exit helper for CLTS.
6670 */
6671static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6672{
6673 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6674 AssertRCReturn(rc, rc);
6675
6676 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6677 AssertMsg( rcStrict == VINF_SUCCESS
6678 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6679
6680 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6681 if (rcStrict == VINF_IEM_RAISED_XCPT)
6682 {
6683 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6684 rcStrict = VINF_SUCCESS;
6685 }
6686
6687 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6688 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6689 return rcStrict;
6690}
6691
6692
6693/**
6694 * VM-exit helper for MOV from CRx (CRx read).
6695 */
6696static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6697{
6698 Assert(iCrReg < 16);
6699 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6700
6701 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6702 AssertRCReturn(rc, rc);
6703
6704 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6705 AssertMsg( rcStrict == VINF_SUCCESS
6706 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6707
6708 if (iGReg == X86_GREG_xSP)
6709 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6710 else
6711 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6712#ifdef VBOX_WITH_STATISTICS
6713 switch (iCrReg)
6714 {
6715 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6716 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6717 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6718 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6719 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6720 }
6721#endif
6722 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6723 return rcStrict;
6724}
6725
6726
6727/**
6728 * VM-exit helper for MOV to CRx (CRx write).
6729 */
6730static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6731{
6732 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6733
6734 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6735 AssertMsg( rcStrict == VINF_SUCCESS
6736 || rcStrict == VINF_IEM_RAISED_XCPT
6737 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6738
6739 switch (iCrReg)
6740 {
6741 case 0:
6742 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6743 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6744 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6745 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6746 break;
6747
6748 case 2:
6749 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6750 /* Nothing to do here, CR2 it's not part of the VMCS. */
6751 break;
6752
6753 case 3:
6754 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6755 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6756 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6757 break;
6758
6759 case 4:
6760 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6761 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6762#ifndef IN_NEM_DARWIN
6763 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6764 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6765#else
6766 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6767#endif
6768 break;
6769
6770 case 8:
6771 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6772 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6773 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6774 break;
6775
6776 default:
6777 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6778 break;
6779 }
6780
6781 if (rcStrict == VINF_IEM_RAISED_XCPT)
6782 {
6783 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6784 rcStrict = VINF_SUCCESS;
6785 }
6786 return rcStrict;
6787}
6788
6789
6790/**
6791 * VM-exit exception handler for \#PF (Page-fault exception).
6792 *
6793 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6794 */
6795static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6796{
6797 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6798 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6799
6800#ifndef IN_NEM_DARWIN
6801 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6802 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6803 { /* likely */ }
6804 else
6805#endif
6806 {
6807#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6808 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6809#endif
6810 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6811 if (!pVmxTransient->fVectoringDoublePF)
6812 {
6813 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6814 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6815 }
6816 else
6817 {
6818 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6819 Assert(!pVmxTransient->fIsNestedGuest);
6820 vmxHCSetPendingXcptDF(pVCpu);
6821 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6822 }
6823 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6824 return VINF_SUCCESS;
6825 }
6826
6827 Assert(!pVmxTransient->fIsNestedGuest);
6828
6829 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6830 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6831 if (pVmxTransient->fVectoringPF)
6832 {
6833 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6834 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6835 }
6836
6837 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6838 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6839 AssertRCReturn(rc, rc);
6840
6841 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6842 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6843
6844 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6845 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6846
6847 Log4Func(("#PF: rc=%Rrc\n", rc));
6848 if (rc == VINF_SUCCESS)
6849 {
6850 /*
6851 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6852 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6853 */
6854 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6855 TRPMResetTrap(pVCpu);
6856 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6857 return rc;
6858 }
6859
6860 if (rc == VINF_EM_RAW_GUEST_TRAP)
6861 {
6862 if (!pVmxTransient->fVectoringDoublePF)
6863 {
6864 /* It's a guest page fault and needs to be reflected to the guest. */
6865 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6866 TRPMResetTrap(pVCpu);
6867 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6868 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6869 uGstErrorCode, pVmxTransient->uExitQual);
6870 }
6871 else
6872 {
6873 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6874 TRPMResetTrap(pVCpu);
6875 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6876 vmxHCSetPendingXcptDF(pVCpu);
6877 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6878 }
6879
6880 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6881 return VINF_SUCCESS;
6882 }
6883
6884 TRPMResetTrap(pVCpu);
6885 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6886 return rc;
6887}
6888
6889
6890/**
6891 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6892 *
6893 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6894 */
6895static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6896{
6897 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6898 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6899
6900 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6901 AssertRCReturn(rc, rc);
6902
6903 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6904 {
6905 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6906 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6907
6908 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6909 * provides VM-exit instruction length. If this causes problem later,
6910 * disassemble the instruction like it's done on AMD-V. */
6911 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6912 AssertRCReturn(rc2, rc2);
6913 return rc;
6914 }
6915
6916 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6917 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6918 return VINF_SUCCESS;
6919}
6920
6921
6922/**
6923 * VM-exit exception handler for \#BP (Breakpoint exception).
6924 *
6925 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6926 */
6927static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6928{
6929 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6930 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6931
6932 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6933 AssertRCReturn(rc, rc);
6934
6935 VBOXSTRICTRC rcStrict;
6936 if (!pVmxTransient->fIsNestedGuest)
6937 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6938 else
6939 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6940
6941 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6942 {
6943 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6944 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6945 rcStrict = VINF_SUCCESS;
6946 }
6947
6948 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6949 return rcStrict;
6950}
6951
6952
6953/**
6954 * VM-exit exception handler for \#AC (Alignment-check exception).
6955 *
6956 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6957 */
6958static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6959{
6960 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6961
6962 /*
6963 * Detect #ACs caused by host having enabled split-lock detection.
6964 * Emulate such instructions.
6965 */
6966#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
6967 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6968 AssertRCReturn(rc, rc);
6969 /** @todo detect split lock in cpu feature? */
6970 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6971 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6972 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6973 || CPUMGetGuestCPL(pVCpu) != 3
6974 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6975 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6976 {
6977 /*
6978 * Check for debug/trace events and import state accordingly.
6979 */
6980 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6981 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6982 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6983#ifndef IN_NEM_DARWIN
6984 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6985#endif
6986 )
6987 {
6988 if (pVM->cCpus == 1)
6989 {
6990#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6991 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
6992 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6993#else
6994 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
6995 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6996#endif
6997 AssertRCReturn(rc, rc);
6998 }
6999 }
7000 else
7001 {
7002 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7003 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7004 AssertRCReturn(rc, rc);
7005
7006 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7007
7008 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7009 {
7010 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7011 if (rcStrict != VINF_SUCCESS)
7012 return rcStrict;
7013 }
7014 }
7015
7016 /*
7017 * Emulate the instruction.
7018 *
7019 * We have to ignore the LOCK prefix here as we must not retrigger the
7020 * detection on the host. This isn't all that satisfactory, though...
7021 */
7022 if (pVM->cCpus == 1)
7023 {
7024 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7025 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7026
7027 /** @todo For SMP configs we should do a rendezvous here. */
7028 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7029 if (rcStrict == VINF_SUCCESS)
7030#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7031 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7032 HM_CHANGED_GUEST_RIP
7033 | HM_CHANGED_GUEST_RFLAGS
7034 | HM_CHANGED_GUEST_GPRS_MASK
7035 | HM_CHANGED_GUEST_CS
7036 | HM_CHANGED_GUEST_SS);
7037#else
7038 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7039#endif
7040 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7041 {
7042 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7043 rcStrict = VINF_SUCCESS;
7044 }
7045 return rcStrict;
7046 }
7047 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7048 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7049 return VINF_EM_EMULATE_SPLIT_LOCK;
7050 }
7051
7052 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7053 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7054 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7055
7056 /* Re-inject it. We'll detect any nesting before getting here. */
7057 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7058 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7059 return VINF_SUCCESS;
7060}
7061
7062
7063/**
7064 * VM-exit exception handler for \#DB (Debug exception).
7065 *
7066 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7067 */
7068static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7069{
7070 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7071 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7072
7073 /*
7074 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7075 */
7076 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7077
7078 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7079 uint64_t const uDR6 = X86_DR6_INIT_VAL
7080 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7081 | X86_DR6_BD | X86_DR6_BS));
7082
7083 int rc;
7084 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7085 if (!pVmxTransient->fIsNestedGuest)
7086 {
7087 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7088
7089 /*
7090 * Prevents stepping twice over the same instruction when the guest is stepping using
7091 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7092 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7093 */
7094 if ( rc == VINF_EM_DBG_STEPPED
7095 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7096 {
7097 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7098 rc = VINF_EM_RAW_GUEST_TRAP;
7099 }
7100 }
7101 else
7102 rc = VINF_EM_RAW_GUEST_TRAP;
7103 Log6Func(("rc=%Rrc\n", rc));
7104 if (rc == VINF_EM_RAW_GUEST_TRAP)
7105 {
7106 /*
7107 * The exception was for the guest. Update DR6, DR7.GD and
7108 * IA32_DEBUGCTL.LBR before forwarding it.
7109 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
7110 */
7111#ifndef IN_NEM_DARWIN
7112 VMMRZCallRing3Disable(pVCpu);
7113 HM_DISABLE_PREEMPT(pVCpu);
7114
7115 pCtx->dr[6] &= ~X86_DR6_B_MASK;
7116 pCtx->dr[6] |= uDR6;
7117 if (CPUMIsGuestDebugStateActive(pVCpu))
7118 ASMSetDR6(pCtx->dr[6]);
7119
7120 HM_RESTORE_PREEMPT();
7121 VMMRZCallRing3Enable(pVCpu);
7122#else
7123 /** @todo */
7124#endif
7125
7126 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7127 AssertRCReturn(rc, rc);
7128
7129 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7130 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
7131
7132 /* Paranoia. */
7133 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7134 pCtx->dr[7] |= X86_DR7_RA1_MASK;
7135
7136 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
7137 AssertRC(rc);
7138
7139 /*
7140 * Raise #DB in the guest.
7141 *
7142 * It is important to reflect exactly what the VM-exit gave us (preserving the
7143 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7144 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7145 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7146 *
7147 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7148 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7149 */
7150 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7151 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7152 return VINF_SUCCESS;
7153 }
7154
7155 /*
7156 * Not a guest trap, must be a hypervisor related debug event then.
7157 * Update DR6 in case someone is interested in it.
7158 */
7159 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7160 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7161 CPUMSetHyperDR6(pVCpu, uDR6);
7162
7163 return rc;
7164}
7165
7166
7167/**
7168 * Hacks its way around the lovely mesa driver's backdoor accesses.
7169 *
7170 * @sa hmR0SvmHandleMesaDrvGp.
7171 */
7172static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7173{
7174 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7175 RT_NOREF(pCtx);
7176
7177 /* For now we'll just skip the instruction. */
7178 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7179}
7180
7181
7182/**
7183 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7184 * backdoor logging w/o checking what it is running inside.
7185 *
7186 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7187 * backdoor port and magic numbers loaded in registers.
7188 *
7189 * @returns true if it is, false if it isn't.
7190 * @sa hmR0SvmIsMesaDrvGp.
7191 */
7192DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7193{
7194 /* 0xed: IN eAX,dx */
7195 uint8_t abInstr[1];
7196 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7197 return false;
7198
7199 /* Check that it is #GP(0). */
7200 if (pVmxTransient->uExitIntErrorCode != 0)
7201 return false;
7202
7203 /* Check magic and port. */
7204 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7205 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7206 if (pCtx->rax != UINT32_C(0x564d5868))
7207 return false;
7208 if (pCtx->dx != UINT32_C(0x5658))
7209 return false;
7210
7211 /* Flat ring-3 CS. */
7212 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7213 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7214 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7215 if (pCtx->cs.Attr.n.u2Dpl != 3)
7216 return false;
7217 if (pCtx->cs.u64Base != 0)
7218 return false;
7219
7220 /* Check opcode. */
7221 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7222 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7223 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7224 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7225 if (RT_FAILURE(rc))
7226 return false;
7227 if (abInstr[0] != 0xed)
7228 return false;
7229
7230 return true;
7231}
7232
7233
7234/**
7235 * VM-exit exception handler for \#GP (General-protection exception).
7236 *
7237 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7238 */
7239static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7240{
7241 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7242 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7243
7244 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7245 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7246#ifndef IN_NEM_DARWIN
7247 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7248 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7249 { /* likely */ }
7250 else
7251#endif
7252 {
7253#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7254# ifndef IN_NEM_DARWIN
7255 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7256# else
7257 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7258# endif
7259#endif
7260 /*
7261 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7262 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7263 */
7264 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7265 AssertRCReturn(rc, rc);
7266 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7267 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7268
7269 if ( pVmxTransient->fIsNestedGuest
7270 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7271 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7272 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7273 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7274 else
7275 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7276 return rc;
7277 }
7278
7279#ifndef IN_NEM_DARWIN
7280 Assert(CPUMIsGuestInRealModeEx(pCtx));
7281 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7282 Assert(!pVmxTransient->fIsNestedGuest);
7283
7284 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7285 AssertRCReturn(rc, rc);
7286
7287 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7288 if (rcStrict == VINF_SUCCESS)
7289 {
7290 if (!CPUMIsGuestInRealModeEx(pCtx))
7291 {
7292 /*
7293 * The guest is no longer in real-mode, check if we can continue executing the
7294 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7295 */
7296 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7297 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7298 {
7299 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7301 }
7302 else
7303 {
7304 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7305 rcStrict = VINF_EM_RESCHEDULE;
7306 }
7307 }
7308 else
7309 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7310 }
7311 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7312 {
7313 rcStrict = VINF_SUCCESS;
7314 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7315 }
7316 return VBOXSTRICTRC_VAL(rcStrict);
7317#endif
7318}
7319
7320
7321/**
7322 * VM-exit exception handler for \#DE (Divide Error).
7323 *
7324 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7325 */
7326static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7327{
7328 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7329 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7330
7331 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7332 AssertRCReturn(rc, rc);
7333
7334 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7335 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7336 {
7337 uint8_t cbInstr = 0;
7338 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7339 if (rc2 == VINF_SUCCESS)
7340 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7341 else if (rc2 == VERR_NOT_FOUND)
7342 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7343 else
7344 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7345 }
7346 else
7347 rcStrict = VINF_SUCCESS; /* Do nothing. */
7348
7349 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7350 if (RT_FAILURE(rcStrict))
7351 {
7352 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7353 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7354 rcStrict = VINF_SUCCESS;
7355 }
7356
7357 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7358 return VBOXSTRICTRC_VAL(rcStrict);
7359}
7360
7361
7362/**
7363 * VM-exit exception handler wrapper for all other exceptions that are not handled
7364 * by a specific handler.
7365 *
7366 * This simply re-injects the exception back into the VM without any special
7367 * processing.
7368 *
7369 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7370 */
7371static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7372{
7373 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7374
7375#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7376# ifndef IN_NEM_DARWIN
7377 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7378 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7379 ("uVector=%#x u32XcptBitmap=%#X32\n",
7380 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7381 NOREF(pVmcsInfo);
7382# endif
7383#endif
7384
7385 /*
7386 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7387 * would have been handled while checking exits due to event delivery.
7388 */
7389 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7390
7391#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7392 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7393 AssertRCReturn(rc, rc);
7394 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7395#endif
7396
7397#ifdef VBOX_WITH_STATISTICS
7398 switch (uVector)
7399 {
7400 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7401 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7402 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7403 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7404 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7405 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7406 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7407 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7408 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7409 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7410 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7411 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7412 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7413 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7414 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7415 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7416 default:
7417 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7418 break;
7419 }
7420#endif
7421
7422 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7423 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7424 NOREF(uVector);
7425
7426 /* Re-inject the original exception into the guest. */
7427 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7428 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7429 return VINF_SUCCESS;
7430}
7431
7432
7433/**
7434 * VM-exit exception handler for all exceptions (except NMIs!).
7435 *
7436 * @remarks This may be called for both guests and nested-guests. Take care to not
7437 * make assumptions and avoid doing anything that is not relevant when
7438 * executing a nested-guest (e.g., Mesa driver hacks).
7439 */
7440static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7441{
7442 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7443
7444 /*
7445 * If this VM-exit occurred while delivering an event through the guest IDT, take
7446 * action based on the return code and additional hints (e.g. for page-faults)
7447 * that will be updated in the VMX transient structure.
7448 */
7449 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7450 if (rcStrict == VINF_SUCCESS)
7451 {
7452 /*
7453 * If an exception caused a VM-exit due to delivery of an event, the original
7454 * event may have to be re-injected into the guest. We shall reinject it and
7455 * continue guest execution. However, page-fault is a complicated case and
7456 * needs additional processing done in vmxHCExitXcptPF().
7457 */
7458 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7459 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7460 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7461 || uVector == X86_XCPT_PF)
7462 {
7463 switch (uVector)
7464 {
7465 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7466 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7467 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7468 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7469 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7470 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7471 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7472 default:
7473 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7474 }
7475 }
7476 /* else: inject pending event before resuming guest execution. */
7477 }
7478 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7479 {
7480 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7481 rcStrict = VINF_SUCCESS;
7482 }
7483
7484 return rcStrict;
7485}
7486/** @} */
7487
7488
7489/** @name VM-exit handlers.
7490 * @{
7491 */
7492/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7493/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7494/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7495
7496/**
7497 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7498 */
7499HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7500{
7501 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7502 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7503
7504#ifndef IN_NEM_DARWIN
7505 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7506 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7507 return VINF_SUCCESS;
7508 return VINF_EM_RAW_INTERRUPT;
7509#else
7510 return VINF_SUCCESS;
7511#endif
7512}
7513
7514
7515/**
7516 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7517 * VM-exit.
7518 */
7519HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7520{
7521 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7522 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7523
7524 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7525
7526 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7527 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7528 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7529
7530 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7531 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7532 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7533 NOREF(pVmcsInfo);
7534
7535 VBOXSTRICTRC rcStrict;
7536 switch (uExitIntType)
7537 {
7538#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7539 /*
7540 * Host physical NMIs:
7541 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7542 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7543 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7544 *
7545 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7546 * See Intel spec. 27.5.5 "Updating Non-Register State".
7547 */
7548 case VMX_EXIT_INT_INFO_TYPE_NMI:
7549 {
7550 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7551 break;
7552 }
7553#endif
7554
7555 /*
7556 * Privileged software exceptions (#DB from ICEBP),
7557 * Software exceptions (#BP and #OF),
7558 * Hardware exceptions:
7559 * Process the required exceptions and resume guest execution if possible.
7560 */
7561 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7562 Assert(uVector == X86_XCPT_DB);
7563 RT_FALL_THRU();
7564 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7565 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7566 RT_FALL_THRU();
7567 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7568 {
7569 NOREF(uVector);
7570 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7571 | HMVMX_READ_EXIT_INSTR_LEN
7572 | HMVMX_READ_IDT_VECTORING_INFO
7573 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7574 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7575 break;
7576 }
7577
7578 default:
7579 {
7580 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7581 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7582 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7583 break;
7584 }
7585 }
7586
7587 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7588 return rcStrict;
7589}
7590
7591
7592/**
7593 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7594 */
7595HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7596{
7597 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7598
7599 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7600 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7601 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7602
7603 /* Evaluate and deliver pending events and resume guest execution. */
7604 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7605 return VINF_SUCCESS;
7606}
7607
7608
7609/**
7610 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7611 */
7612HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7613{
7614 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7615
7616 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7617 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7618 {
7619 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7620 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7621 }
7622
7623 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7624
7625 /*
7626 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7627 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7628 */
7629 uint32_t fIntrState;
7630 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7631 AssertRC(rc);
7632 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7633 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7634 {
7635 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7636 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7637
7638 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7639 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7640 AssertRC(rc);
7641 }
7642
7643 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7644 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7645
7646 /* Evaluate and deliver pending events and resume guest execution. */
7647 return VINF_SUCCESS;
7648}
7649
7650
7651/**
7652 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7653 */
7654HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7655{
7656 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7657 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7658}
7659
7660
7661/**
7662 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7663 */
7664HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7665{
7666 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7667 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7668}
7669
7670
7671/**
7672 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7673 */
7674HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7675{
7676 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7677
7678 /*
7679 * Get the state we need and update the exit history entry.
7680 */
7681 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7682 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7683 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7684 AssertRCReturn(rc, rc);
7685
7686 VBOXSTRICTRC rcStrict;
7687 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7688 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7689 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7690 if (!pExitRec)
7691 {
7692 /*
7693 * Regular CPUID instruction execution.
7694 */
7695 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7696 if (rcStrict == VINF_SUCCESS)
7697 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7698 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7699 {
7700 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7701 rcStrict = VINF_SUCCESS;
7702 }
7703 }
7704 else
7705 {
7706 /*
7707 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7708 */
7709 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7710 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7711 AssertRCReturn(rc2, rc2);
7712
7713 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7714 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7715
7716 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7717 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7718
7719 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7720 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7721 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7722 }
7723 return rcStrict;
7724}
7725
7726
7727/**
7728 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7729 */
7730HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7731{
7732 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7733
7734 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7735 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7736 AssertRCReturn(rc, rc);
7737
7738 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7739 return VINF_EM_RAW_EMULATE_INSTR;
7740
7741 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7742 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7743}
7744
7745
7746/**
7747 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7748 */
7749HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7750{
7751 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7752
7753 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7754 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7755 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7756 AssertRCReturn(rc, rc);
7757
7758 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7759 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7760 {
7761 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7762 we must reset offsetting on VM-entry. See @bugref{6634}. */
7763 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7764 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7765 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7766 }
7767 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7768 {
7769 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7770 rcStrict = VINF_SUCCESS;
7771 }
7772 return rcStrict;
7773}
7774
7775
7776/**
7777 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7778 */
7779HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7780{
7781 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7782
7783 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7784 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7785 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7786 AssertRCReturn(rc, rc);
7787
7788 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7789 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7790 {
7791 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7792 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7793 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7794 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7795 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7796 }
7797 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7798 {
7799 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7800 rcStrict = VINF_SUCCESS;
7801 }
7802 return rcStrict;
7803}
7804
7805
7806/**
7807 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7808 */
7809HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7810{
7811 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7812
7813 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7814 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
7815 | CPUMCTX_EXTRN_CR0
7816 | CPUMCTX_EXTRN_RFLAGS
7817 | CPUMCTX_EXTRN_RIP
7818 | CPUMCTX_EXTRN_SS>(pVCpu, pVmcsInfo, __FUNCTION__);
7819 AssertRCReturn(rc, rc);
7820
7821 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7822 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7823 if (RT_LIKELY(rc == VINF_SUCCESS))
7824 {
7825 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7826 Assert(pVmxTransient->cbExitInstr == 2);
7827 }
7828 else
7829 {
7830 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7831 rc = VERR_EM_INTERPRETER;
7832 }
7833 return rc;
7834}
7835
7836
7837/**
7838 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7839 */
7840HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7841{
7842 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7843
7844 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7845 if (EMAreHypercallInstructionsEnabled(pVCpu))
7846 {
7847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7848 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7849 | CPUMCTX_EXTRN_RFLAGS
7850 | CPUMCTX_EXTRN_CR0
7851 | CPUMCTX_EXTRN_SS
7852 | CPUMCTX_EXTRN_CS
7853 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7854 AssertRCReturn(rc, rc);
7855
7856 /* Perform the hypercall. */
7857 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7858 if (rcStrict == VINF_SUCCESS)
7859 {
7860 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7861 AssertRCReturn(rc, rc);
7862 }
7863 else
7864 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7865 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7866 || RT_FAILURE(rcStrict));
7867
7868 /* If the hypercall changes anything other than guest's general-purpose registers,
7869 we would need to reload the guest changed bits here before VM-entry. */
7870 }
7871 else
7872 Log4Func(("Hypercalls not enabled\n"));
7873
7874 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7875 if (RT_FAILURE(rcStrict))
7876 {
7877 vmxHCSetPendingXcptUD(pVCpu);
7878 rcStrict = VINF_SUCCESS;
7879 }
7880
7881 return rcStrict;
7882}
7883
7884
7885/**
7886 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7887 */
7888HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7889{
7890 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7891#ifndef IN_NEM_DARWIN
7892 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7893#endif
7894
7895 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7896 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7897 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7898 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7899 AssertRCReturn(rc, rc);
7900
7901 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7902
7903 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7904 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7905 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7906 {
7907 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7908 rcStrict = VINF_SUCCESS;
7909 }
7910 else
7911 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7912 VBOXSTRICTRC_VAL(rcStrict)));
7913 return rcStrict;
7914}
7915
7916
7917/**
7918 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7919 */
7920HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7921{
7922 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7923
7924 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7925 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7926 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7927 AssertRCReturn(rc, rc);
7928
7929 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7930 if (rcStrict == VINF_SUCCESS)
7931 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7932 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7933 {
7934 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7935 rcStrict = VINF_SUCCESS;
7936 }
7937
7938 return rcStrict;
7939}
7940
7941
7942/**
7943 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7944 */
7945HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7946{
7947 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7948
7949 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7950 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7951 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7952 AssertRCReturn(rc, rc);
7953
7954 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7955 if (RT_SUCCESS(rcStrict))
7956 {
7957 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7958 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7959 rcStrict = VINF_SUCCESS;
7960 }
7961
7962 return rcStrict;
7963}
7964
7965
7966/**
7967 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7968 * VM-exit.
7969 */
7970HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7971{
7972 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7973 return VINF_EM_RESET;
7974}
7975
7976
7977/**
7978 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7979 */
7980HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7981{
7982 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7983
7984 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7985 AssertRCReturn(rc, rc);
7986
7987 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7988 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7989 rc = VINF_SUCCESS;
7990 else
7991 rc = VINF_EM_HALT;
7992
7993 if (rc != VINF_SUCCESS)
7994 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7995 return rc;
7996}
7997
7998
7999#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8000/**
8001 * VM-exit handler for instructions that result in a \#UD exception delivered to
8002 * the guest.
8003 */
8004HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8005{
8006 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8007 vmxHCSetPendingXcptUD(pVCpu);
8008 return VINF_SUCCESS;
8009}
8010#endif
8011
8012
8013/**
8014 * VM-exit handler for expiry of the VMX-preemption timer.
8015 */
8016HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8017{
8018 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8019
8020 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8021 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8022Log12(("vmxHCExitPreemptTimer:\n"));
8023
8024 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8025 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8026 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8027 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8028 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8029}
8030
8031
8032/**
8033 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8034 */
8035HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8036{
8037 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8038
8039 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8040 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8041 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8042 AssertRCReturn(rc, rc);
8043
8044 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8045 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8046 : HM_CHANGED_RAISED_XCPT_MASK);
8047
8048#ifndef IN_NEM_DARWIN
8049 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8050 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8051 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8052 {
8053 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8054 hmR0VmxUpdateStartVmFunction(pVCpu);
8055 }
8056#endif
8057
8058 return rcStrict;
8059}
8060
8061
8062/**
8063 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8064 */
8065HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8066{
8067 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8068
8069 /** @todo Enable the new code after finding a reliably guest test-case. */
8070#if 1
8071 return VERR_EM_INTERPRETER;
8072#else
8073 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8074 | HMVMX_READ_EXIT_INSTR_INFO
8075 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8076 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8077 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8078 AssertRCReturn(rc, rc);
8079
8080 /* Paranoia. Ensure this has a memory operand. */
8081 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8082
8083 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8084 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8085 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8086 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8087
8088 RTGCPTR GCPtrDesc;
8089 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8090
8091 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8092 GCPtrDesc, uType);
8093 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8094 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8095 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8096 {
8097 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8098 rcStrict = VINF_SUCCESS;
8099 }
8100 return rcStrict;
8101#endif
8102}
8103
8104
8105/**
8106 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8107 * VM-exit.
8108 */
8109HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8110{
8111 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8112 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8113 AssertRCReturn(rc, rc);
8114
8115 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8116 if (RT_FAILURE(rc))
8117 return rc;
8118
8119 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8120 NOREF(uInvalidReason);
8121
8122#ifdef VBOX_STRICT
8123 uint32_t fIntrState;
8124 uint64_t u64Val;
8125 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8126 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8127 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8128
8129 Log4(("uInvalidReason %u\n", uInvalidReason));
8130 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8131 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8132 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8133
8134 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8135 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8136 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8137 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8138 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8139 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8140 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8141 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8142 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8143 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8144 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8145 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8146# ifndef IN_NEM_DARWIN
8147 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8148 {
8149 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8150 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8151 }
8152
8153 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8154# endif
8155#endif
8156
8157 return VERR_VMX_INVALID_GUEST_STATE;
8158}
8159
8160/**
8161 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8162 */
8163HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8164{
8165 /*
8166 * Cumulative notes of all recognized but unexpected VM-exits.
8167 *
8168 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8169 * nested-paging is used.
8170 *
8171 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8172 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8173 * this function (and thereby stop VM execution) for handling such instructions.
8174 *
8175 *
8176 * VMX_EXIT_INIT_SIGNAL:
8177 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8178 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8179 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8180 *
8181 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8182 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8183 * See Intel spec. "23.8 Restrictions on VMX operation".
8184 *
8185 * VMX_EXIT_SIPI:
8186 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8187 * activity state is used. We don't make use of it as our guests don't have direct
8188 * access to the host local APIC.
8189 *
8190 * See Intel spec. 25.3 "Other Causes of VM-exits".
8191 *
8192 * VMX_EXIT_IO_SMI:
8193 * VMX_EXIT_SMI:
8194 * This can only happen if we support dual-monitor treatment of SMI, which can be
8195 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8196 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8197 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8198 *
8199 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8200 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8201 *
8202 * VMX_EXIT_ERR_MSR_LOAD:
8203 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8204 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8205 * execution.
8206 *
8207 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8208 *
8209 * VMX_EXIT_ERR_MACHINE_CHECK:
8210 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8211 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8212 * #MC exception abort class exception is raised. We thus cannot assume a
8213 * reasonable chance of continuing any sort of execution and we bail.
8214 *
8215 * See Intel spec. 15.1 "Machine-check Architecture".
8216 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8217 *
8218 * VMX_EXIT_PML_FULL:
8219 * VMX_EXIT_VIRTUALIZED_EOI:
8220 * VMX_EXIT_APIC_WRITE:
8221 * We do not currently support any of these features and thus they are all unexpected
8222 * VM-exits.
8223 *
8224 * VMX_EXIT_GDTR_IDTR_ACCESS:
8225 * VMX_EXIT_LDTR_TR_ACCESS:
8226 * VMX_EXIT_RDRAND:
8227 * VMX_EXIT_RSM:
8228 * VMX_EXIT_VMFUNC:
8229 * VMX_EXIT_ENCLS:
8230 * VMX_EXIT_RDSEED:
8231 * VMX_EXIT_XSAVES:
8232 * VMX_EXIT_XRSTORS:
8233 * VMX_EXIT_UMWAIT:
8234 * VMX_EXIT_TPAUSE:
8235 * VMX_EXIT_LOADIWKEY:
8236 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8237 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8238 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8239 *
8240 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8241 */
8242 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8243 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8244 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8245}
8246
8247
8248/**
8249 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8250 */
8251HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8252{
8253 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8254
8255 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8256
8257 /** @todo Optimize this: We currently drag in the whole MSR state
8258 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8259 * MSRs required. That would require changes to IEM and possibly CPUM too.
8260 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8261 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8262 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8263 int rc;
8264 switch (idMsr)
8265 {
8266 default:
8267 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8268 __FUNCTION__);
8269 AssertRCReturn(rc, rc);
8270 break;
8271 case MSR_K8_FS_BASE:
8272 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8273 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8274 AssertRCReturn(rc, rc);
8275 break;
8276 case MSR_K8_GS_BASE:
8277 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8278 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8279 AssertRCReturn(rc, rc);
8280 break;
8281 }
8282
8283 Log4Func(("ecx=%#RX32\n", idMsr));
8284
8285#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8286 Assert(!pVmxTransient->fIsNestedGuest);
8287 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8288 {
8289 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8290 && idMsr != MSR_K6_EFER)
8291 {
8292 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8293 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8294 }
8295 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8296 {
8297 Assert(pVmcsInfo->pvMsrBitmap);
8298 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8299 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8300 {
8301 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8302 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8303 }
8304 }
8305 }
8306#endif
8307
8308 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8309 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8310 if (rcStrict == VINF_SUCCESS)
8311 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8312 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8313 {
8314 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8315 rcStrict = VINF_SUCCESS;
8316 }
8317 else
8318 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8319 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8320
8321 return rcStrict;
8322}
8323
8324
8325/**
8326 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8327 */
8328HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8329{
8330 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8331
8332 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8333
8334 /*
8335 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8336 * Although we don't need to fetch the base as it will be overwritten shortly, while
8337 * loading guest-state we would also load the entire segment register including limit
8338 * and attributes and thus we need to load them here.
8339 */
8340 /** @todo Optimize this: We currently drag in the whole MSR state
8341 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8342 * MSRs required. That would require changes to IEM and possibly CPUM too.
8343 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8344 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8345 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8346 int rc;
8347 switch (idMsr)
8348 {
8349 default:
8350 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8351 __FUNCTION__);
8352 AssertRCReturn(rc, rc);
8353 break;
8354
8355 case MSR_K8_FS_BASE:
8356 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8357 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8358 AssertRCReturn(rc, rc);
8359 break;
8360 case MSR_K8_GS_BASE:
8361 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8362 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8363 AssertRCReturn(rc, rc);
8364 break;
8365 }
8366 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8367
8368 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8369 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8370
8371 if (rcStrict == VINF_SUCCESS)
8372 {
8373 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8374
8375 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8376 if ( idMsr == MSR_IA32_APICBASE
8377 || ( idMsr >= MSR_IA32_X2APIC_START
8378 && idMsr <= MSR_IA32_X2APIC_END))
8379 {
8380 /*
8381 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8382 * When full APIC register virtualization is implemented we'll have to make
8383 * sure APIC state is saved from the VMCS before IEM changes it.
8384 */
8385 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8386 }
8387 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8388 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8389 else if (idMsr == MSR_K6_EFER)
8390 {
8391 /*
8392 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8393 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8394 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8395 */
8396 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8397 }
8398
8399 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8400 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8401 {
8402 switch (idMsr)
8403 {
8404 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8405 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8406 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8407 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8408 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8409 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8410 default:
8411 {
8412#ifndef IN_NEM_DARWIN
8413 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8414 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8415 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8416 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8417#else
8418 AssertMsgFailed(("TODO\n"));
8419#endif
8420 break;
8421 }
8422 }
8423 }
8424#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8425 else
8426 {
8427 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8428 switch (idMsr)
8429 {
8430 case MSR_IA32_SYSENTER_CS:
8431 case MSR_IA32_SYSENTER_EIP:
8432 case MSR_IA32_SYSENTER_ESP:
8433 case MSR_K8_FS_BASE:
8434 case MSR_K8_GS_BASE:
8435 {
8436 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8437 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8438 }
8439
8440 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8441 default:
8442 {
8443 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8444 {
8445 /* EFER MSR writes are always intercepted. */
8446 if (idMsr != MSR_K6_EFER)
8447 {
8448 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8449 idMsr));
8450 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8451 }
8452 }
8453
8454 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8455 {
8456 Assert(pVmcsInfo->pvMsrBitmap);
8457 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8458 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8459 {
8460 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8461 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8462 }
8463 }
8464 break;
8465 }
8466 }
8467 }
8468#endif /* VBOX_STRICT */
8469 }
8470 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8471 {
8472 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8473 rcStrict = VINF_SUCCESS;
8474 }
8475 else
8476 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8477 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8478
8479 return rcStrict;
8480}
8481
8482
8483/**
8484 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8485 */
8486HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8487{
8488 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8489
8490 /** @todo The guest has likely hit a contended spinlock. We might want to
8491 * poke a schedule different guest VCPU. */
8492 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8493 if (RT_SUCCESS(rc))
8494 return VINF_EM_RAW_INTERRUPT;
8495
8496 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8497 return rc;
8498}
8499
8500
8501/**
8502 * VM-exit handler for when the TPR value is lowered below the specified
8503 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8504 */
8505HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8506{
8507 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8508 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8509
8510 /*
8511 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8512 * We'll re-evaluate pending interrupts and inject them before the next VM
8513 * entry so we can just continue execution here.
8514 */
8515 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8516 return VINF_SUCCESS;
8517}
8518
8519
8520/**
8521 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8522 * VM-exit.
8523 *
8524 * @retval VINF_SUCCESS when guest execution can continue.
8525 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8526 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8527 * incompatible guest state for VMX execution (real-on-v86 case).
8528 */
8529HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8530{
8531 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8532 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8533
8534 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8535 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8536 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8537
8538 VBOXSTRICTRC rcStrict;
8539 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8540 uint64_t const uExitQual = pVmxTransient->uExitQual;
8541 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8542 switch (uAccessType)
8543 {
8544 /*
8545 * MOV to CRx.
8546 */
8547 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8548 {
8549 /*
8550 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8551 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8552 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8553 * PAE PDPTEs as well.
8554 */
8555 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8556 AssertRCReturn(rc, rc);
8557
8558 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8559#ifndef IN_NEM_DARWIN
8560 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8561#endif
8562 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8563 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8564
8565 /*
8566 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8567 * - When nested paging isn't used.
8568 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8569 * - We are executing in the VM debug loop.
8570 */
8571#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8572# ifndef IN_NEM_DARWIN
8573 Assert( iCrReg != 3
8574 || !VM_IS_VMX_NESTED_PAGING(pVM)
8575 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8576 || pVCpu->hmr0.s.fUsingDebugLoop);
8577# else
8578 Assert( iCrReg != 3
8579 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8580# endif
8581#endif
8582
8583 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8584 Assert( iCrReg != 8
8585 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8586
8587 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8588 AssertMsg( rcStrict == VINF_SUCCESS
8589 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8590
8591#ifndef IN_NEM_DARWIN
8592 /*
8593 * This is a kludge for handling switches back to real mode when we try to use
8594 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8595 * deal with special selector values, so we have to return to ring-3 and run
8596 * there till the selector values are V86 mode compatible.
8597 *
8598 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8599 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8600 * this function.
8601 */
8602 if ( iCrReg == 0
8603 && rcStrict == VINF_SUCCESS
8604 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8605 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8606 && (uOldCr0 & X86_CR0_PE)
8607 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8608 {
8609 /** @todo Check selectors rather than returning all the time. */
8610 Assert(!pVmxTransient->fIsNestedGuest);
8611 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8612 rcStrict = VINF_EM_RESCHEDULE_REM;
8613 }
8614#endif
8615
8616 break;
8617 }
8618
8619 /*
8620 * MOV from CRx.
8621 */
8622 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8623 {
8624 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8625 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8626
8627 /*
8628 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8629 * - When nested paging isn't used.
8630 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8631 * - We are executing in the VM debug loop.
8632 */
8633#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8634# ifndef IN_NEM_DARWIN
8635 Assert( iCrReg != 3
8636 || !VM_IS_VMX_NESTED_PAGING(pVM)
8637 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8638 || pVCpu->hmr0.s.fLeaveDone);
8639# else
8640 Assert( iCrReg != 3
8641 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8642# endif
8643#endif
8644
8645 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8646 Assert( iCrReg != 8
8647 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8648
8649 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8650 break;
8651 }
8652
8653 /*
8654 * CLTS (Clear Task-Switch Flag in CR0).
8655 */
8656 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8657 {
8658 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8659 break;
8660 }
8661
8662 /*
8663 * LMSW (Load Machine-Status Word into CR0).
8664 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8665 */
8666 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8667 {
8668 RTGCPTR GCPtrEffDst;
8669 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8670 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8671 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8672 if (fMemOperand)
8673 {
8674 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8675 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8676 }
8677 else
8678 GCPtrEffDst = NIL_RTGCPTR;
8679 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8680 break;
8681 }
8682
8683 default:
8684 {
8685 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8686 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8687 }
8688 }
8689
8690 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8691 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8692 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8693
8694 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8695 NOREF(pVM);
8696 return rcStrict;
8697}
8698
8699
8700/**
8701 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8702 * VM-exit.
8703 */
8704HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8705{
8706 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8707 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8708
8709 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8710 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8711 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8712 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8713#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8714 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8715 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8716 AssertRCReturn(rc, rc);
8717
8718 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8719 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8720 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8721 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8722 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8723 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8724 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8725 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8726
8727 /*
8728 * Update exit history to see if this exit can be optimized.
8729 */
8730 VBOXSTRICTRC rcStrict;
8731 PCEMEXITREC pExitRec = NULL;
8732 if ( !fGstStepping
8733 && !fDbgStepping)
8734 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8735 !fIOString
8736 ? !fIOWrite
8737 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8738 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8739 : !fIOWrite
8740 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8741 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8742 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8743 if (!pExitRec)
8744 {
8745 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8746 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8747
8748 uint32_t const cbValue = s_aIOSizes[uIOSize];
8749 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8750 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8751 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8752 if (fIOString)
8753 {
8754 /*
8755 * INS/OUTS - I/O String instruction.
8756 *
8757 * Use instruction-information if available, otherwise fall back on
8758 * interpreting the instruction.
8759 */
8760 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8761 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8762 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8763 if (fInsOutsInfo)
8764 {
8765 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8766 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8767 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8768 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8769 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8770 if (fIOWrite)
8771 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8772 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8773 else
8774 {
8775 /*
8776 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8777 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8778 * See Intel Instruction spec. for "INS".
8779 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8780 */
8781 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8782 }
8783 }
8784 else
8785 rcStrict = IEMExecOne(pVCpu);
8786
8787 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8788 fUpdateRipAlready = true;
8789 }
8790 else
8791 {
8792 /*
8793 * IN/OUT - I/O instruction.
8794 */
8795 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8796 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8797 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8798 if (fIOWrite)
8799 {
8800 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8801 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8802#ifndef IN_NEM_DARWIN
8803 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8804 && !pCtx->eflags.Bits.u1TF)
8805 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8806#endif
8807 }
8808 else
8809 {
8810 uint32_t u32Result = 0;
8811 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8812 if (IOM_SUCCESS(rcStrict))
8813 {
8814 /* Save result of I/O IN instr. in AL/AX/EAX. */
8815 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8816 }
8817#ifndef IN_NEM_DARWIN
8818 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8819 && !pCtx->eflags.Bits.u1TF)
8820 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8821#endif
8822 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8823 }
8824 }
8825
8826 if (IOM_SUCCESS(rcStrict))
8827 {
8828 if (!fUpdateRipAlready)
8829 {
8830 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8831 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8832 }
8833
8834 /*
8835 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8836 * while booting Fedora 17 64-bit guest.
8837 *
8838 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8839 */
8840 if (fIOString)
8841 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8842
8843 /*
8844 * If any I/O breakpoints are armed, we need to check if one triggered
8845 * and take appropriate action.
8846 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8847 */
8848#if 1
8849 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8850#else
8851 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8852 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8853 AssertRCReturn(rc, rc);
8854#endif
8855
8856 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8857 * execution engines about whether hyper BPs and such are pending. */
8858 uint32_t const uDr7 = pCtx->dr[7];
8859 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8860 && X86_DR7_ANY_RW_IO(uDr7)
8861 && (pCtx->cr4 & X86_CR4_DE))
8862 || DBGFBpIsHwIoArmed(pVM)))
8863 {
8864 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8865
8866#ifndef IN_NEM_DARWIN
8867 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8868 VMMRZCallRing3Disable(pVCpu);
8869 HM_DISABLE_PREEMPT(pVCpu);
8870
8871 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8872
8873 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8874 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8875 {
8876 /* Raise #DB. */
8877 if (fIsGuestDbgActive)
8878 ASMSetDR6(pCtx->dr[6]);
8879 if (pCtx->dr[7] != uDr7)
8880 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8881
8882 vmxHCSetPendingXcptDB(pVCpu);
8883 }
8884 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8885 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8886 else if ( rcStrict2 != VINF_SUCCESS
8887 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8888 rcStrict = rcStrict2;
8889 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8890
8891 HM_RESTORE_PREEMPT();
8892 VMMRZCallRing3Enable(pVCpu);
8893#else
8894 /** @todo */
8895#endif
8896 }
8897 }
8898
8899#ifdef VBOX_STRICT
8900 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8901 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8902 Assert(!fIOWrite);
8903 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8904 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8905 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8906 Assert(fIOWrite);
8907 else
8908 {
8909# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8910 * statuses, that the VMM device and some others may return. See
8911 * IOM_SUCCESS() for guidance. */
8912 AssertMsg( RT_FAILURE(rcStrict)
8913 || rcStrict == VINF_SUCCESS
8914 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8915 || rcStrict == VINF_EM_DBG_BREAKPOINT
8916 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8917 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8918# endif
8919 }
8920#endif
8921 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8922 }
8923 else
8924 {
8925 /*
8926 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8927 */
8928 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8929 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8930 AssertRCReturn(rc2, rc2);
8931 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8932 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8933 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8934 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8935 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8936 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8937
8938 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8939 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8940
8941 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8942 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8943 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8944 }
8945 return rcStrict;
8946}
8947
8948
8949/**
8950 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8951 * VM-exit.
8952 */
8953HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8954{
8955 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8956
8957 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8958 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8959 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8960 {
8961 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8962 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8963 {
8964 uint32_t uErrCode;
8965 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8966 {
8967 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8968 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8969 }
8970 else
8971 uErrCode = 0;
8972
8973 RTGCUINTPTR GCPtrFaultAddress;
8974 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8975 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8976 else
8977 GCPtrFaultAddress = 0;
8978
8979 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8980
8981 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8982 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8983
8984 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8985 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8986 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8987 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8988 }
8989 }
8990
8991 /* Fall back to the interpreter to emulate the task-switch. */
8992 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8993 return VERR_EM_INTERPRETER;
8994}
8995
8996
8997/**
8998 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8999 */
9000HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9001{
9002 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9003
9004 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9005 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9007 AssertRC(rc);
9008 return VINF_EM_DBG_STEPPED;
9009}
9010
9011
9012/**
9013 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9014 */
9015HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9016{
9017 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9018 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9019
9020 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9021 | HMVMX_READ_EXIT_INSTR_LEN
9022 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9023 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9024 | HMVMX_READ_IDT_VECTORING_INFO
9025 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9026
9027 /*
9028 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9029 */
9030 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9031 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9032 {
9033 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9034 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9035 {
9036 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9037 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9038 }
9039 }
9040 else
9041 {
9042 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9043 return rcStrict;
9044 }
9045
9046 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9047 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9048 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9049 AssertRCReturn(rc, rc);
9050
9051 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9052 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9053 switch (uAccessType)
9054 {
9055#ifndef IN_NEM_DARWIN
9056 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9057 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9058 {
9059 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9060 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9061 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9062
9063 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9064 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9065 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9066 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9067 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9068
9069 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9070 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9071 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9072 if ( rcStrict == VINF_SUCCESS
9073 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9074 || rcStrict == VERR_PAGE_NOT_PRESENT)
9075 {
9076 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9077 | HM_CHANGED_GUEST_APIC_TPR);
9078 rcStrict = VINF_SUCCESS;
9079 }
9080 break;
9081 }
9082#else
9083 /** @todo */
9084#endif
9085
9086 default:
9087 {
9088 Log4Func(("uAccessType=%#x\n", uAccessType));
9089 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9090 break;
9091 }
9092 }
9093
9094 if (rcStrict != VINF_SUCCESS)
9095 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9096 return rcStrict;
9097}
9098
9099
9100/**
9101 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9102 * VM-exit.
9103 */
9104HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9105{
9106 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9107 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9108
9109 /*
9110 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9111 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9112 * must emulate the MOV DRx access.
9113 */
9114 if (!pVmxTransient->fIsNestedGuest)
9115 {
9116 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9117 if (pVmxTransient->fWasGuestDebugStateActive)
9118 {
9119 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9120 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9121 }
9122
9123 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9124 && !pVmxTransient->fWasHyperDebugStateActive)
9125 {
9126 Assert(!DBGFIsStepping(pVCpu));
9127 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9128
9129 /* Don't intercept MOV DRx any more. */
9130 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9131 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9132 AssertRC(rc);
9133
9134#ifndef IN_NEM_DARWIN
9135 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9136 VMMRZCallRing3Disable(pVCpu);
9137 HM_DISABLE_PREEMPT(pVCpu);
9138
9139 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9140 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9141 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9142
9143 HM_RESTORE_PREEMPT();
9144 VMMRZCallRing3Enable(pVCpu);
9145#else
9146 CPUMR3NemActivateGuestDebugState(pVCpu);
9147 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9148 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9149#endif
9150
9151#ifdef VBOX_WITH_STATISTICS
9152 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9153 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9154 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9155 else
9156 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9157#endif
9158 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9159 return VINF_SUCCESS;
9160 }
9161 }
9162
9163 /*
9164 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
9165 * The EFER MSR is always up-to-date.
9166 * Update the segment registers and DR7 from the CPU.
9167 */
9168 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9169 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9170 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9171 AssertRCReturn(rc, rc);
9172 Log4Func(("cs:rip=%#04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
9173
9174 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9175 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9176 {
9177 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
9178 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
9179 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
9180 if (RT_SUCCESS(rc))
9181 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
9182 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9183 }
9184 else
9185 {
9186 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
9187 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
9188 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
9189 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9190 }
9191
9192 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9193 if (RT_SUCCESS(rc))
9194 {
9195 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
9196 AssertRCReturn(rc2, rc2);
9197 return VINF_SUCCESS;
9198 }
9199 return rc;
9200}
9201
9202
9203/**
9204 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9205 * Conditional VM-exit.
9206 */
9207HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9208{
9209 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9210
9211#ifndef IN_NEM_DARWIN
9212 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9213
9214 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9215 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9216 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9217 | HMVMX_READ_IDT_VECTORING_INFO
9218 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9219 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9220
9221 /*
9222 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9223 */
9224 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9225 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9226 {
9227 /*
9228 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9229 * instruction emulation to inject the original event. Otherwise, injecting the original event
9230 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9231 */
9232 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9233 { /* likely */ }
9234 else
9235 {
9236 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9237# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9238 /** @todo NSTVMX: Think about how this should be handled. */
9239 if (pVmxTransient->fIsNestedGuest)
9240 return VERR_VMX_IPE_3;
9241# endif
9242 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9243 }
9244 }
9245 else
9246 {
9247 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9248 return rcStrict;
9249 }
9250
9251 /*
9252 * Get sufficient state and update the exit history entry.
9253 */
9254 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9255 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9256 AssertRCReturn(rc, rc);
9257
9258 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9259 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9260 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9261 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9262 if (!pExitRec)
9263 {
9264 /*
9265 * If we succeed, resume guest execution.
9266 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9267 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9268 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9269 * weird case. See @bugref{6043}.
9270 */
9271 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9272 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9273/** @todo bird: We can probably just go straight to IOM here and assume that
9274 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9275 * well. However, we need to address that aliasing workarounds that
9276 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9277 *
9278 * Might also be interesting to see if we can get this done more or
9279 * less locklessly inside IOM. Need to consider the lookup table
9280 * updating and use a bit more carefully first (or do all updates via
9281 * rendezvous) */
9282 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
9283 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
9284 if ( rcStrict == VINF_SUCCESS
9285 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9286 || rcStrict == VERR_PAGE_NOT_PRESENT)
9287 {
9288 /* Successfully handled MMIO operation. */
9289 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9290 | HM_CHANGED_GUEST_APIC_TPR);
9291 rcStrict = VINF_SUCCESS;
9292 }
9293 }
9294 else
9295 {
9296 /*
9297 * Frequent exit or something needing probing. Call EMHistoryExec.
9298 */
9299 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9300 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9301
9302 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9303 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9304
9305 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9306 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9307 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9308 }
9309 return rcStrict;
9310#else
9311 AssertFailed();
9312 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9313#endif
9314}
9315
9316
9317/**
9318 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9319 * VM-exit.
9320 */
9321HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9322{
9323 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9324#ifndef IN_NEM_DARWIN
9325 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9326
9327 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9328 | HMVMX_READ_EXIT_INSTR_LEN
9329 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9330 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9331 | HMVMX_READ_IDT_VECTORING_INFO
9332 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9333 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9334
9335 /*
9336 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9337 */
9338 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9339 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9340 {
9341 /*
9342 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9343 * we shall resolve the nested #PF and re-inject the original event.
9344 */
9345 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9346 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9347 }
9348 else
9349 {
9350 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9351 return rcStrict;
9352 }
9353
9354 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9355 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9356 AssertRCReturn(rc, rc);
9357
9358 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9359 uint64_t const uExitQual = pVmxTransient->uExitQual;
9360 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9361
9362 RTGCUINT uErrorCode = 0;
9363 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9364 uErrorCode |= X86_TRAP_PF_ID;
9365 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9366 uErrorCode |= X86_TRAP_PF_RW;
9367 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9368 uErrorCode |= X86_TRAP_PF_P;
9369
9370 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9371 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9372
9373 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9374
9375 /*
9376 * Handle the pagefault trap for the nested shadow table.
9377 */
9378 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9379 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
9380 TRPMResetTrap(pVCpu);
9381
9382 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9383 if ( rcStrict == VINF_SUCCESS
9384 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9385 || rcStrict == VERR_PAGE_NOT_PRESENT)
9386 {
9387 /* Successfully synced our nested page tables. */
9388 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9389 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9390 return VINF_SUCCESS;
9391 }
9392 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9393 return rcStrict;
9394
9395#else /* IN_NEM_DARWIN */
9396 PVM pVM = pVCpu->CTX_SUFF(pVM);
9397 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9398 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9399 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9400 vmxHCImportGuestRip(pVCpu);
9401 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9402
9403 /*
9404 * Ask PGM for information about the given GCPhys. We need to check if we're
9405 * out of sync first.
9406 */
9407 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
9408 PGMPHYSNEMPAGEINFO Info;
9409 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9410 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9411 if (RT_SUCCESS(rc))
9412 {
9413 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9414 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9415 {
9416 if (State.fCanResume)
9417 {
9418 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9419 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9420 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9421 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9422 State.fDidSomething ? "" : " no-change"));
9423 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9424 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9425 return VINF_SUCCESS;
9426 }
9427 }
9428
9429 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9430 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9431 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9432 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9433 State.fDidSomething ? "" : " no-change"));
9434 }
9435 else
9436 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9437 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9438 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9439
9440 /*
9441 * Emulate the memory access, either access handler or special memory.
9442 */
9443 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9444 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9445 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9446 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9447 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9448
9449 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9450 AssertRCReturn(rc, rc);
9451
9452 VBOXSTRICTRC rcStrict;
9453 if (!pExitRec)
9454 rcStrict = IEMExecOne(pVCpu);
9455 else
9456 {
9457 /* Frequent access or probing. */
9458 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9459 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9460 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9461 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9462 }
9463
9464 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9465
9466 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9467 return rcStrict;
9468#endif /* IN_NEM_DARWIN */
9469}
9470
9471#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9472
9473/**
9474 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9475 */
9476HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9477{
9478 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9479
9480 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9481 | HMVMX_READ_EXIT_INSTR_INFO
9482 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9483 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9484 | CPUMCTX_EXTRN_SREG_MASK
9485 | CPUMCTX_EXTRN_HWVIRT
9486 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9487 AssertRCReturn(rc, rc);
9488
9489 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9490
9491 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9492 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9493
9494 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9495 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9496 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9497 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9498 {
9499 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9500 rcStrict = VINF_SUCCESS;
9501 }
9502 return rcStrict;
9503}
9504
9505
9506/**
9507 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9508 */
9509HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9510{
9511 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9512
9513 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9514 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9515 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9516 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9517 AssertRCReturn(rc, rc);
9518
9519 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9520
9521 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9522 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9523 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9524 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9525 {
9526 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9527 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9528 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9529 }
9530 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9531 return rcStrict;
9532}
9533
9534
9535/**
9536 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9537 */
9538HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9539{
9540 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9541
9542 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9543 | HMVMX_READ_EXIT_INSTR_INFO
9544 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9545 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9546 | CPUMCTX_EXTRN_SREG_MASK
9547 | CPUMCTX_EXTRN_HWVIRT
9548 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9549 AssertRCReturn(rc, rc);
9550
9551 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9552
9553 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9554 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9555
9556 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9558 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9559 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9560 {
9561 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9562 rcStrict = VINF_SUCCESS;
9563 }
9564 return rcStrict;
9565}
9566
9567
9568/**
9569 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9570 */
9571HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9572{
9573 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9574
9575 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9576 | HMVMX_READ_EXIT_INSTR_INFO
9577 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9578 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9579 | CPUMCTX_EXTRN_SREG_MASK
9580 | CPUMCTX_EXTRN_HWVIRT
9581 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9582 AssertRCReturn(rc, rc);
9583
9584 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9585
9586 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9587 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9588
9589 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9590 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9591 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9592 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9593 {
9594 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9595 rcStrict = VINF_SUCCESS;
9596 }
9597 return rcStrict;
9598}
9599
9600
9601/**
9602 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9603 */
9604HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9605{
9606 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9607
9608 /*
9609 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9610 * thus might not need to import the shadow VMCS state, it's safer just in case
9611 * code elsewhere dares look at unsynced VMCS fields.
9612 */
9613 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9614 | HMVMX_READ_EXIT_INSTR_INFO
9615 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9616 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9617 | CPUMCTX_EXTRN_SREG_MASK
9618 | CPUMCTX_EXTRN_HWVIRT
9619 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9620 AssertRCReturn(rc, rc);
9621
9622 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9623
9624 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9625 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9626 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9627
9628 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9629 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9630 {
9631 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9632
9633# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9634 /* Try for exit optimization. This is on the following instruction
9635 because it would be a waste of time to have to reinterpret the
9636 already decoded vmwrite instruction. */
9637 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9638 if (pExitRec)
9639 {
9640 /* Frequent access or probing. */
9641 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9642 AssertRCReturn(rc, rc);
9643
9644 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9645 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9646 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9647 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9648 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9649 }
9650# endif
9651 }
9652 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9653 {
9654 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9655 rcStrict = VINF_SUCCESS;
9656 }
9657 return rcStrict;
9658}
9659
9660
9661/**
9662 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9663 */
9664HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9665{
9666 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9667
9668 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9669 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9670 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9671 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9672 AssertRCReturn(rc, rc);
9673
9674 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9675
9676 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9677 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9678 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9679 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9680 {
9681 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9682 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9683 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9684 }
9685 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9686 return rcStrict;
9687}
9688
9689
9690/**
9691 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9692 */
9693HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9694{
9695 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9696
9697 /*
9698 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9699 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9700 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9701 */
9702 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9703 | HMVMX_READ_EXIT_INSTR_INFO
9704 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9705 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9706 | CPUMCTX_EXTRN_SREG_MASK
9707 | CPUMCTX_EXTRN_HWVIRT
9708 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9709 AssertRCReturn(rc, rc);
9710
9711 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9712
9713 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9714 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9715 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9716
9717 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9718 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9719 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9720 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9721 {
9722 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9723 rcStrict = VINF_SUCCESS;
9724 }
9725 return rcStrict;
9726}
9727
9728
9729/**
9730 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9731 */
9732HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9733{
9734 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9735
9736 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9737 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9738 | CPUMCTX_EXTRN_HWVIRT
9739 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9740 AssertRCReturn(rc, rc);
9741
9742 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9743
9744 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9745 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9746 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9747 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9748 {
9749 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9750 rcStrict = VINF_SUCCESS;
9751 }
9752 return rcStrict;
9753}
9754
9755
9756/**
9757 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9758 */
9759HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9760{
9761 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9762
9763 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9764 | HMVMX_READ_EXIT_INSTR_INFO
9765 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9766 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9767 | CPUMCTX_EXTRN_SREG_MASK
9768 | CPUMCTX_EXTRN_HWVIRT
9769 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9770 AssertRCReturn(rc, rc);
9771
9772 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9773
9774 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9775 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9776
9777 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9778 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9779 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9780 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9781 {
9782 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9783 rcStrict = VINF_SUCCESS;
9784 }
9785 return rcStrict;
9786}
9787
9788
9789/**
9790 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9791 */
9792HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9793{
9794 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9795
9796 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9797 | HMVMX_READ_EXIT_INSTR_INFO
9798 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9799 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9800 | CPUMCTX_EXTRN_SREG_MASK
9801 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9802 AssertRCReturn(rc, rc);
9803
9804 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9805
9806 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9807 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9808
9809 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9810 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9811 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9812 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9813 {
9814 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9815 rcStrict = VINF_SUCCESS;
9816 }
9817 return rcStrict;
9818}
9819
9820
9821# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9822/**
9823 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9824 */
9825HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9826{
9827 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9828
9829 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9830 | HMVMX_READ_EXIT_INSTR_INFO
9831 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9832 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9833 | CPUMCTX_EXTRN_SREG_MASK
9834 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9835 AssertRCReturn(rc, rc);
9836
9837 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9838
9839 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9840 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9841
9842 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9843 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9845 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9846 {
9847 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9848 rcStrict = VINF_SUCCESS;
9849 }
9850 return rcStrict;
9851}
9852# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9853#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9854/** @} */
9855
9856
9857#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9858/** @name Nested-guest VM-exit handlers.
9859 * @{
9860 */
9861/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9862/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9863/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9864
9865/**
9866 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9867 * Conditional VM-exit.
9868 */
9869HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9870{
9871 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9872
9873 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9874
9875 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9876 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9877 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9878
9879 switch (uExitIntType)
9880 {
9881# ifndef IN_NEM_DARWIN
9882 /*
9883 * Physical NMIs:
9884 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9885 */
9886 case VMX_EXIT_INT_INFO_TYPE_NMI:
9887 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9888# endif
9889
9890 /*
9891 * Hardware exceptions,
9892 * Software exceptions,
9893 * Privileged software exceptions:
9894 * Figure out if the exception must be delivered to the guest or the nested-guest.
9895 */
9896 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9897 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9898 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9899 {
9900 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9901 | HMVMX_READ_EXIT_INSTR_LEN
9902 | HMVMX_READ_IDT_VECTORING_INFO
9903 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9904
9905 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9906 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9907 {
9908 /* Exit qualification is required for debug and page-fault exceptions. */
9909 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9910
9911 /*
9912 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9913 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9914 * length. However, if delivery of a software interrupt, software exception or privileged
9915 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9916 */
9917 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9918 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9919 pVmxTransient->uExitIntErrorCode,
9920 pVmxTransient->uIdtVectoringInfo,
9921 pVmxTransient->uIdtVectoringErrorCode);
9922#ifdef DEBUG_ramshankar
9923 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9924 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9925 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9926 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9927 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9928 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9929#endif
9930 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9931 }
9932
9933 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9934 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9935 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9936 }
9937
9938 /*
9939 * Software interrupts:
9940 * VM-exits cannot be caused by software interrupts.
9941 *
9942 * External interrupts:
9943 * This should only happen when "acknowledge external interrupts on VM-exit"
9944 * control is set. However, we never set this when executing a guest or
9945 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9946 * the guest.
9947 */
9948 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9949 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9950 default:
9951 {
9952 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9953 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9954 }
9955 }
9956}
9957
9958
9959/**
9960 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9961 * Unconditional VM-exit.
9962 */
9963HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9964{
9965 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9966 return IEMExecVmxVmexitTripleFault(pVCpu);
9967}
9968
9969
9970/**
9971 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9972 */
9973HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9974{
9975 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9976
9977 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9978 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9979 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9980}
9981
9982
9983/**
9984 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9985 */
9986HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9987{
9988 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9989
9990 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9991 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9992 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9993}
9994
9995
9996/**
9997 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9998 * Unconditional VM-exit.
9999 */
10000HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10001{
10002 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10003
10004 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10005 | HMVMX_READ_EXIT_INSTR_LEN
10006 | HMVMX_READ_IDT_VECTORING_INFO
10007 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10008
10009 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10010 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10011 pVmxTransient->uIdtVectoringErrorCode);
10012 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10013}
10014
10015
10016/**
10017 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10018 */
10019HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10020{
10021 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10022
10023 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10024 {
10025 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10026 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10027 }
10028 return vmxHCExitHlt(pVCpu, pVmxTransient);
10029}
10030
10031
10032/**
10033 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10034 */
10035HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10036{
10037 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10038
10039 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10040 {
10041 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10042 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10043 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10044 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10045 }
10046 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10047}
10048
10049
10050/**
10051 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10052 */
10053HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10054{
10055 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10056
10057 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10058 {
10059 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10060 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10061 }
10062 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10063}
10064
10065
10066/**
10067 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10068 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10069 */
10070HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10071{
10072 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10073
10074 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10075 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10076
10077 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10078
10079 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10080 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10081 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10082
10083 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10084 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10085 u64VmcsField &= UINT64_C(0xffffffff);
10086
10087 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10088 {
10089 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10090 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10091 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10092 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10093 }
10094
10095 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10096 return vmxHCExitVmread(pVCpu, pVmxTransient);
10097 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10098}
10099
10100
10101/**
10102 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10103 */
10104HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10105{
10106 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10107
10108 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10109 {
10110 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10111 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10112 }
10113
10114 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10115}
10116
10117
10118/**
10119 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10120 * Conditional VM-exit.
10121 */
10122HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10123{
10124 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10125
10126 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10127 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10128
10129 VBOXSTRICTRC rcStrict;
10130 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10131 switch (uAccessType)
10132 {
10133 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10134 {
10135 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10136 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10137 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10138 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10139
10140 bool fIntercept;
10141 switch (iCrReg)
10142 {
10143 case 0:
10144 case 4:
10145 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10146 break;
10147
10148 case 3:
10149 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10150 break;
10151
10152 case 8:
10153 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10154 break;
10155
10156 default:
10157 fIntercept = false;
10158 break;
10159 }
10160 if (fIntercept)
10161 {
10162 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10163 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10164 }
10165 else
10166 {
10167 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10168 AssertRCReturn(rc, rc);
10169 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10170 }
10171 break;
10172 }
10173
10174 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10175 {
10176 /*
10177 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10178 * CR2 reads do not cause a VM-exit.
10179 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10180 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10181 */
10182 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10183 if ( iCrReg == 3
10184 || iCrReg == 8)
10185 {
10186 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10187 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10188 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10189 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10190 {
10191 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10192 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10193 }
10194 else
10195 {
10196 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10197 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10198 }
10199 }
10200 else
10201 {
10202 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10203 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10204 }
10205 break;
10206 }
10207
10208 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10209 {
10210 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10211 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10212 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10213 if ( (uGstHostMask & X86_CR0_TS)
10214 && (uReadShadow & X86_CR0_TS))
10215 {
10216 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10217 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10218 }
10219 else
10220 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10221 break;
10222 }
10223
10224 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10225 {
10226 RTGCPTR GCPtrEffDst;
10227 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10228 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10229 if (fMemOperand)
10230 {
10231 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10232 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10233 }
10234 else
10235 GCPtrEffDst = NIL_RTGCPTR;
10236
10237 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10238 {
10239 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10240 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10241 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10242 }
10243 else
10244 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10245 break;
10246 }
10247
10248 default:
10249 {
10250 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10251 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10252 }
10253 }
10254
10255 if (rcStrict == VINF_IEM_RAISED_XCPT)
10256 {
10257 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10258 rcStrict = VINF_SUCCESS;
10259 }
10260 return rcStrict;
10261}
10262
10263
10264/**
10265 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10266 * Conditional VM-exit.
10267 */
10268HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10269{
10270 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10271
10272 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10273 {
10274 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10275 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10276 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10277 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10278 }
10279 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10280}
10281
10282
10283/**
10284 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10285 * Conditional VM-exit.
10286 */
10287HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10288{
10289 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10290
10291 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10292
10293 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10294 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10295 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10296
10297 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10298 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10299 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10300 {
10301 /*
10302 * IN/OUT instruction:
10303 * - Provides VM-exit instruction length.
10304 *
10305 * INS/OUTS instruction:
10306 * - Provides VM-exit instruction length.
10307 * - Provides Guest-linear address.
10308 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10309 */
10310 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10311 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10312
10313 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10314 pVmxTransient->ExitInstrInfo.u = 0;
10315 pVmxTransient->uGuestLinearAddr = 0;
10316
10317 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10318 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10319 if (fIOString)
10320 {
10321 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10322 if (fVmxInsOutsInfo)
10323 {
10324 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10325 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10326 }
10327 }
10328
10329 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10330 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10331 }
10332 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10333}
10334
10335
10336/**
10337 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10338 */
10339HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10340{
10341 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10342
10343 uint32_t fMsrpm;
10344 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10345 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10346 else
10347 fMsrpm = VMXMSRPM_EXIT_RD;
10348
10349 if (fMsrpm & VMXMSRPM_EXIT_RD)
10350 {
10351 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10352 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10353 }
10354 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10355}
10356
10357
10358/**
10359 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10360 */
10361HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10362{
10363 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10364
10365 uint32_t fMsrpm;
10366 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10367 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10368 else
10369 fMsrpm = VMXMSRPM_EXIT_WR;
10370
10371 if (fMsrpm & VMXMSRPM_EXIT_WR)
10372 {
10373 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10374 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10375 }
10376 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10377}
10378
10379
10380/**
10381 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10382 */
10383HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10384{
10385 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10386
10387 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10388 {
10389 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10390 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10391 }
10392 return vmxHCExitMwait(pVCpu, pVmxTransient);
10393}
10394
10395
10396/**
10397 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10398 * VM-exit.
10399 */
10400HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10401{
10402 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10403
10404 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10405 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10406 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10407 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10408}
10409
10410
10411/**
10412 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10413 */
10414HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10415{
10416 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10417
10418 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10419 {
10420 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10421 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10422 }
10423 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10424}
10425
10426
10427/**
10428 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10429 */
10430HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10431{
10432 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10433
10434 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10435 * PAUSE when executing a nested-guest? If it does not, we would not need
10436 * to check for the intercepts here. Just call VM-exit... */
10437
10438 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10439 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10440 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10441 {
10442 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10443 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10444 }
10445 return vmxHCExitPause(pVCpu, pVmxTransient);
10446}
10447
10448
10449/**
10450 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10451 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10452 */
10453HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10454{
10455 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10456
10457 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10458 {
10459 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10460 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10461 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10462 }
10463 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10464}
10465
10466
10467/**
10468 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10469 * VM-exit.
10470 */
10471HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10472{
10473 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10474
10475 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10476 | HMVMX_READ_EXIT_INSTR_LEN
10477 | HMVMX_READ_IDT_VECTORING_INFO
10478 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10479
10480 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10481
10482 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10483 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10484
10485 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10486 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10487 pVmxTransient->uIdtVectoringErrorCode);
10488 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10489}
10490
10491
10492/**
10493 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10494 * Conditional VM-exit.
10495 */
10496HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10497{
10498 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10499
10500 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10501 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10502 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10503}
10504
10505
10506/**
10507 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10508 * Conditional VM-exit.
10509 */
10510HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10511{
10512 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10513
10514 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10515 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10516 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10517}
10518
10519
10520/**
10521 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10522 */
10523HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10524{
10525 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10526
10527 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10528 {
10529 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10530 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10531 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10532 }
10533 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10534}
10535
10536
10537/**
10538 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10539 */
10540HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10541{
10542 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10543
10544 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10545 {
10546 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10547 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10548 }
10549 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10550}
10551
10552
10553/**
10554 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10555 */
10556HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10557{
10558 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10559
10560 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10561 {
10562 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10563 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10564 | HMVMX_READ_EXIT_INSTR_INFO
10565 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10566 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10567 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10568 }
10569 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10570}
10571
10572
10573/**
10574 * Nested-guest VM-exit handler for invalid-guest state
10575 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10576 */
10577HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10578{
10579 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10580
10581 /*
10582 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10583 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10584 * Handle it like it's in an invalid guest state of the outer guest.
10585 *
10586 * When the fast path is implemented, this should be changed to cause the corresponding
10587 * nested-guest VM-exit.
10588 */
10589 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10590}
10591
10592
10593/**
10594 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10595 * and only provide the instruction length.
10596 *
10597 * Unconditional VM-exit.
10598 */
10599HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10600{
10601 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10602
10603#ifdef VBOX_STRICT
10604 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10605 switch (pVmxTransient->uExitReason)
10606 {
10607 case VMX_EXIT_ENCLS:
10608 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10609 break;
10610
10611 case VMX_EXIT_VMFUNC:
10612 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10613 break;
10614 }
10615#endif
10616
10617 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10618 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10619}
10620
10621
10622/**
10623 * Nested-guest VM-exit handler for instructions that provide instruction length as
10624 * well as more information.
10625 *
10626 * Unconditional VM-exit.
10627 */
10628HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10629{
10630 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10631
10632# ifdef VBOX_STRICT
10633 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10634 switch (pVmxTransient->uExitReason)
10635 {
10636 case VMX_EXIT_GDTR_IDTR_ACCESS:
10637 case VMX_EXIT_LDTR_TR_ACCESS:
10638 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10639 break;
10640
10641 case VMX_EXIT_RDRAND:
10642 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10643 break;
10644
10645 case VMX_EXIT_RDSEED:
10646 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10647 break;
10648
10649 case VMX_EXIT_XSAVES:
10650 case VMX_EXIT_XRSTORS:
10651 /** @todo NSTVMX: Verify XSS-bitmap. */
10652 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10653 break;
10654
10655 case VMX_EXIT_UMWAIT:
10656 case VMX_EXIT_TPAUSE:
10657 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10658 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10659 break;
10660
10661 case VMX_EXIT_LOADIWKEY:
10662 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10663 break;
10664 }
10665# endif
10666
10667 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10668 | HMVMX_READ_EXIT_INSTR_LEN
10669 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10670 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10671 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10672}
10673
10674# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10675
10676/**
10677 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10678 * Conditional VM-exit.
10679 */
10680HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10681{
10682 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10683 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10684
10685 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10686 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10687 {
10688 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10689 | HMVMX_READ_EXIT_INSTR_LEN
10690 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10691 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10692 | HMVMX_READ_IDT_VECTORING_INFO
10693 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10694 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10695 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10696 AssertRCReturn(rc, rc);
10697
10698 /*
10699 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10700 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10701 * it's its problem to deal with that issue and we'll clear the recovered event.
10702 */
10703 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10704 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10705 { /*likely*/ }
10706 else
10707 {
10708 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10709 return rcStrict;
10710 }
10711 bool const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10712
10713 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10714 uint64_t const uExitQual = pVmxTransient->uExitQual;
10715
10716 RTGCPTR GCPtrNestedFault;
10717 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10718 if (fIsLinearAddrValid)
10719 {
10720 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10721 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10722 }
10723 else
10724 GCPtrNestedFault = 0;
10725
10726 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10727 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10728 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10729 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10730 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10731
10732 PGMPTWALK Walk;
10733 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10734 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx),
10735 GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault,
10736 &Walk);
10737 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10738 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10739 if (RT_SUCCESS(rcStrict))
10740 return rcStrict;
10741
10742 if (fClearEventOnForward)
10743 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10744
10745 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10746 pVmxTransient->uIdtVectoringErrorCode);
10747 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10748 {
10749 VMXVEXITINFO const ExitInfo
10750 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10751 pVmxTransient->uExitQual,
10752 pVmxTransient->cbExitInstr,
10753 pVmxTransient->uGuestLinearAddr,
10754 pVmxTransient->uGuestPhysicalAddr);
10755 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10756 }
10757
10758 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10759 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10760 }
10761
10762 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10763}
10764
10765
10766/**
10767 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10768 * Conditional VM-exit.
10769 */
10770HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10771{
10772 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10773 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10774
10775 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10776 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10777 {
10778 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10779 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10780 AssertRCReturn(rc, rc);
10781
10782 PGMPTWALK Walk;
10783 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10784 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10785 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10786 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10787 0 /* GCPtrNestedFault */, &Walk);
10788 if (RT_SUCCESS(rcStrict))
10789 {
10790 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10791 return rcStrict;
10792 }
10793
10794 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10795 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10796 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10797
10798 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10799 pVmxTransient->uIdtVectoringErrorCode);
10800 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10801 }
10802
10803 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10804}
10805
10806# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10807
10808/** @} */
10809#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10810
10811
10812/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10813 * probes.
10814 *
10815 * The following few functions and associated structure contains the bloat
10816 * necessary for providing detailed debug events and dtrace probes as well as
10817 * reliable host side single stepping. This works on the principle of
10818 * "subclassing" the normal execution loop and workers. We replace the loop
10819 * method completely and override selected helpers to add necessary adjustments
10820 * to their core operation.
10821 *
10822 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10823 * any performance for debug and analysis features.
10824 *
10825 * @{
10826 */
10827
10828/**
10829 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10830 * the debug run loop.
10831 */
10832typedef struct VMXRUNDBGSTATE
10833{
10834 /** The RIP we started executing at. This is for detecting that we stepped. */
10835 uint64_t uRipStart;
10836 /** The CS we started executing with. */
10837 uint16_t uCsStart;
10838
10839 /** Whether we've actually modified the 1st execution control field. */
10840 bool fModifiedProcCtls : 1;
10841 /** Whether we've actually modified the 2nd execution control field. */
10842 bool fModifiedProcCtls2 : 1;
10843 /** Whether we've actually modified the exception bitmap. */
10844 bool fModifiedXcptBitmap : 1;
10845
10846 /** We desire the modified the CR0 mask to be cleared. */
10847 bool fClearCr0Mask : 1;
10848 /** We desire the modified the CR4 mask to be cleared. */
10849 bool fClearCr4Mask : 1;
10850 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10851 uint32_t fCpe1Extra;
10852 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10853 uint32_t fCpe1Unwanted;
10854 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10855 uint32_t fCpe2Extra;
10856 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10857 uint32_t bmXcptExtra;
10858 /** The sequence number of the Dtrace provider settings the state was
10859 * configured against. */
10860 uint32_t uDtraceSettingsSeqNo;
10861 /** VM-exits to check (one bit per VM-exit). */
10862 uint32_t bmExitsToCheck[3];
10863
10864 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10865 uint32_t fProcCtlsInitial;
10866 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10867 uint32_t fProcCtls2Initial;
10868 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10869 uint32_t bmXcptInitial;
10870} VMXRUNDBGSTATE;
10871AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10872typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10873
10874
10875/**
10876 * Initializes the VMXRUNDBGSTATE structure.
10877 *
10878 * @param pVCpu The cross context virtual CPU structure of the
10879 * calling EMT.
10880 * @param pVmxTransient The VMX-transient structure.
10881 * @param pDbgState The debug state to initialize.
10882 */
10883static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10884{
10885 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10886 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10887
10888 pDbgState->fModifiedProcCtls = false;
10889 pDbgState->fModifiedProcCtls2 = false;
10890 pDbgState->fModifiedXcptBitmap = false;
10891 pDbgState->fClearCr0Mask = false;
10892 pDbgState->fClearCr4Mask = false;
10893 pDbgState->fCpe1Extra = 0;
10894 pDbgState->fCpe1Unwanted = 0;
10895 pDbgState->fCpe2Extra = 0;
10896 pDbgState->bmXcptExtra = 0;
10897 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10898 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10899 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10900}
10901
10902
10903/**
10904 * Updates the VMSC fields with changes requested by @a pDbgState.
10905 *
10906 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10907 * immediately before executing guest code, i.e. when interrupts are disabled.
10908 * We don't check status codes here as we cannot easily assert or return in the
10909 * latter case.
10910 *
10911 * @param pVCpu The cross context virtual CPU structure.
10912 * @param pVmxTransient The VMX-transient structure.
10913 * @param pDbgState The debug state.
10914 */
10915static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10916{
10917 /*
10918 * Ensure desired flags in VMCS control fields are set.
10919 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10920 *
10921 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10922 * there should be no stale data in pCtx at this point.
10923 */
10924 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10925 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10926 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10927 {
10928 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10929 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10930 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10931 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10932 pDbgState->fModifiedProcCtls = true;
10933 }
10934
10935 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10936 {
10937 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10938 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10939 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10940 pDbgState->fModifiedProcCtls2 = true;
10941 }
10942
10943 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10944 {
10945 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10946 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10947 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10948 pDbgState->fModifiedXcptBitmap = true;
10949 }
10950
10951 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10952 {
10953 pVmcsInfo->u64Cr0Mask = 0;
10954 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10955 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10956 }
10957
10958 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10959 {
10960 pVmcsInfo->u64Cr4Mask = 0;
10961 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10962 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10963 }
10964
10965 NOREF(pVCpu);
10966}
10967
10968
10969/**
10970 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10971 * re-entry next time around.
10972 *
10973 * @returns Strict VBox status code (i.e. informational status codes too).
10974 * @param pVCpu The cross context virtual CPU structure.
10975 * @param pVmxTransient The VMX-transient structure.
10976 * @param pDbgState The debug state.
10977 * @param rcStrict The return code from executing the guest using single
10978 * stepping.
10979 */
10980static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10981 VBOXSTRICTRC rcStrict)
10982{
10983 /*
10984 * Restore VM-exit control settings as we may not reenter this function the
10985 * next time around.
10986 */
10987 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10988
10989 /* We reload the initial value, trigger what we can of recalculations the
10990 next time around. From the looks of things, that's all that's required atm. */
10991 if (pDbgState->fModifiedProcCtls)
10992 {
10993 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10994 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10995 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10996 AssertRC(rc2);
10997 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10998 }
10999
11000 /* We're currently the only ones messing with this one, so just restore the
11001 cached value and reload the field. */
11002 if ( pDbgState->fModifiedProcCtls2
11003 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11004 {
11005 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11006 AssertRC(rc2);
11007 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11008 }
11009
11010 /* If we've modified the exception bitmap, we restore it and trigger
11011 reloading and partial recalculation the next time around. */
11012 if (pDbgState->fModifiedXcptBitmap)
11013 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11014
11015 return rcStrict;
11016}
11017
11018
11019/**
11020 * Configures VM-exit controls for current DBGF and DTrace settings.
11021 *
11022 * This updates @a pDbgState and the VMCS execution control fields to reflect
11023 * the necessary VM-exits demanded by DBGF and DTrace.
11024 *
11025 * @param pVCpu The cross context virtual CPU structure.
11026 * @param pVmxTransient The VMX-transient structure. May update
11027 * fUpdatedTscOffsettingAndPreemptTimer.
11028 * @param pDbgState The debug state.
11029 */
11030static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11031{
11032#ifndef IN_NEM_DARWIN
11033 /*
11034 * Take down the dtrace serial number so we can spot changes.
11035 */
11036 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11037 ASMCompilerBarrier();
11038#endif
11039
11040 /*
11041 * We'll rebuild most of the middle block of data members (holding the
11042 * current settings) as we go along here, so start by clearing it all.
11043 */
11044 pDbgState->bmXcptExtra = 0;
11045 pDbgState->fCpe1Extra = 0;
11046 pDbgState->fCpe1Unwanted = 0;
11047 pDbgState->fCpe2Extra = 0;
11048 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11049 pDbgState->bmExitsToCheck[i] = 0;
11050
11051 /*
11052 * Software interrupts (INT XXh) - no idea how to trigger these...
11053 */
11054 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11055 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11056 || VBOXVMM_INT_SOFTWARE_ENABLED())
11057 {
11058 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11059 }
11060
11061 /*
11062 * INT3 breakpoints - triggered by #BP exceptions.
11063 */
11064 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11065 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11066
11067 /*
11068 * Exception bitmap and XCPT events+probes.
11069 */
11070 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11071 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11072 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11073
11074 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11075 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11076 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11077 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11078 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11079 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11080 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11081 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11082 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11083 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11084 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11085 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11086 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11087 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11088 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11089 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11090 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11091 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11092
11093 if (pDbgState->bmXcptExtra)
11094 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11095
11096 /*
11097 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11098 *
11099 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11100 * So, when adding/changing/removing please don't forget to update it.
11101 *
11102 * Some of the macros are picking up local variables to save horizontal space,
11103 * (being able to see it in a table is the lesser evil here).
11104 */
11105#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11106 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11107 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11108#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11109 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11110 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11111 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11112 } else do { } while (0)
11113#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11114 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11115 { \
11116 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11117 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11118 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11119 } else do { } while (0)
11120#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11121 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11122 { \
11123 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11124 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11125 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11126 } else do { } while (0)
11127#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11128 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11129 { \
11130 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11131 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11132 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11133 } else do { } while (0)
11134
11135 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11136 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11137 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11138 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11139 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11140
11141 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11142 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11143 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11144 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11145 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11146 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11147 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11148 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11149 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11150 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11151 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11152 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11153 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11154 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11155 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11156 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11157 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11158 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11159 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11160 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11161 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11162 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11163 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11164 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11165 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11166 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11167 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11168 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11169 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11170 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11171 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11172 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11173 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11174 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11175 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11176 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11177
11178 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11179 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11180 {
11181 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11182 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11183 AssertRC(rc);
11184
11185#if 0 /** @todo fix me */
11186 pDbgState->fClearCr0Mask = true;
11187 pDbgState->fClearCr4Mask = true;
11188#endif
11189 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11190 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11191 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11192 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11193 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11194 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11195 require clearing here and in the loop if we start using it. */
11196 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11197 }
11198 else
11199 {
11200 if (pDbgState->fClearCr0Mask)
11201 {
11202 pDbgState->fClearCr0Mask = false;
11203 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11204 }
11205 if (pDbgState->fClearCr4Mask)
11206 {
11207 pDbgState->fClearCr4Mask = false;
11208 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11209 }
11210 }
11211 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11212 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11213
11214 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11215 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11216 {
11217 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11218 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11219 }
11220 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11221 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11222
11223 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11224 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11225 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11226 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11227 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11228 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11229 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11230 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11231#if 0 /** @todo too slow, fix handler. */
11232 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11233#endif
11234 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11235
11236 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11237 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11238 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11239 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11240 {
11241 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11242 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11243 }
11244 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11245 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11246 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11247 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11248
11249 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11250 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11251 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11252 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11253 {
11254 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11255 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11256 }
11257 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11258 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11259 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11260 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11261
11262 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11263 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11264 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11265 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11266 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11267 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11268 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11269 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11270 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11271 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11272 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11273 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11274 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11275 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11276 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11277 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11278 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11279 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11280 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11281 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11282 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11283 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11284
11285#undef IS_EITHER_ENABLED
11286#undef SET_ONLY_XBM_IF_EITHER_EN
11287#undef SET_CPE1_XBM_IF_EITHER_EN
11288#undef SET_CPEU_XBM_IF_EITHER_EN
11289#undef SET_CPE2_XBM_IF_EITHER_EN
11290
11291 /*
11292 * Sanitize the control stuff.
11293 */
11294 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11295 if (pDbgState->fCpe2Extra)
11296 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11297 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11298 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11299#ifndef IN_NEM_DARWIN
11300 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11301 {
11302 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11303 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11304 }
11305#else
11306 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11307 {
11308 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11309 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11310 }
11311#endif
11312
11313 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11314 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11315 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11316 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11317}
11318
11319
11320/**
11321 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11322 * appropriate.
11323 *
11324 * The caller has checked the VM-exit against the
11325 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11326 * already, so we don't have to do that either.
11327 *
11328 * @returns Strict VBox status code (i.e. informational status codes too).
11329 * @param pVCpu The cross context virtual CPU structure.
11330 * @param pVmxTransient The VMX-transient structure.
11331 * @param uExitReason The VM-exit reason.
11332 *
11333 * @remarks The name of this function is displayed by dtrace, so keep it short
11334 * and to the point. No longer than 33 chars long, please.
11335 */
11336static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11337{
11338 /*
11339 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11340 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11341 *
11342 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11343 * does. Must add/change/remove both places. Same ordering, please.
11344 *
11345 * Added/removed events must also be reflected in the next section
11346 * where we dispatch dtrace events.
11347 */
11348 bool fDtrace1 = false;
11349 bool fDtrace2 = false;
11350 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11351 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11352 uint32_t uEventArg = 0;
11353#define SET_EXIT(a_EventSubName) \
11354 do { \
11355 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11356 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11357 } while (0)
11358#define SET_BOTH(a_EventSubName) \
11359 do { \
11360 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11361 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11362 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11363 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11364 } while (0)
11365 switch (uExitReason)
11366 {
11367 case VMX_EXIT_MTF:
11368 return vmxHCExitMtf(pVCpu, pVmxTransient);
11369
11370 case VMX_EXIT_XCPT_OR_NMI:
11371 {
11372 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11373 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11374 {
11375 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11376 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11377 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11378 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11379 {
11380 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11381 {
11382 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11383 uEventArg = pVmxTransient->uExitIntErrorCode;
11384 }
11385 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11386 switch (enmEvent1)
11387 {
11388 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11389 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11390 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11391 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11392 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11393 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11394 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11395 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11396 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11397 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11398 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11399 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11400 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11401 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11402 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11403 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11404 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11405 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11406 default: break;
11407 }
11408 }
11409 else
11410 AssertFailed();
11411 break;
11412
11413 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11414 uEventArg = idxVector;
11415 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11416 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11417 break;
11418 }
11419 break;
11420 }
11421
11422 case VMX_EXIT_TRIPLE_FAULT:
11423 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11424 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11425 break;
11426 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11427 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11428 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11429 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11430 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11431
11432 /* Instruction specific VM-exits: */
11433 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11434 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11435 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11436 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11437 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11438 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11439 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11440 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11441 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11442 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11443 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11444 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11445 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11446 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11447 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11448 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11449 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11450 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11451 case VMX_EXIT_MOV_CRX:
11452 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11453 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11454 SET_BOTH(CRX_READ);
11455 else
11456 SET_BOTH(CRX_WRITE);
11457 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11458 break;
11459 case VMX_EXIT_MOV_DRX:
11460 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11461 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11462 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11463 SET_BOTH(DRX_READ);
11464 else
11465 SET_BOTH(DRX_WRITE);
11466 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11467 break;
11468 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11469 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11470 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11471 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11472 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11473 case VMX_EXIT_GDTR_IDTR_ACCESS:
11474 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11475 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11476 {
11477 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11478 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11479 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11480 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11481 }
11482 break;
11483
11484 case VMX_EXIT_LDTR_TR_ACCESS:
11485 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11486 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11487 {
11488 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11489 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11490 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11491 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11492 }
11493 break;
11494
11495 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11496 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11497 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11498 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11499 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11500 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11501 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11502 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11503 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11504 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11505 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11506
11507 /* Events that aren't relevant at this point. */
11508 case VMX_EXIT_EXT_INT:
11509 case VMX_EXIT_INT_WINDOW:
11510 case VMX_EXIT_NMI_WINDOW:
11511 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11512 case VMX_EXIT_PREEMPT_TIMER:
11513 case VMX_EXIT_IO_INSTR:
11514 break;
11515
11516 /* Errors and unexpected events. */
11517 case VMX_EXIT_INIT_SIGNAL:
11518 case VMX_EXIT_SIPI:
11519 case VMX_EXIT_IO_SMI:
11520 case VMX_EXIT_SMI:
11521 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11522 case VMX_EXIT_ERR_MSR_LOAD:
11523 case VMX_EXIT_ERR_MACHINE_CHECK:
11524 case VMX_EXIT_PML_FULL:
11525 case VMX_EXIT_VIRTUALIZED_EOI:
11526 break;
11527
11528 default:
11529 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11530 break;
11531 }
11532#undef SET_BOTH
11533#undef SET_EXIT
11534
11535 /*
11536 * Dtrace tracepoints go first. We do them here at once so we don't
11537 * have to copy the guest state saving and stuff a few dozen times.
11538 * Down side is that we've got to repeat the switch, though this time
11539 * we use enmEvent since the probes are a subset of what DBGF does.
11540 */
11541 if (fDtrace1 || fDtrace2)
11542 {
11543 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11544 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11545 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11546 switch (enmEvent1)
11547 {
11548 /** @todo consider which extra parameters would be helpful for each probe. */
11549 case DBGFEVENT_END: break;
11550 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11551 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11552 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11553 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11554 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11555 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11556 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11557 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11558 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11559 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11560 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11561 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11562 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11563 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11564 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11565 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11566 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11567 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11568 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11569 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11570 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11571 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11572 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11573 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11574 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11575 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11576 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11577 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11578 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11579 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11580 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11581 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11582 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11583 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11584 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11585 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11586 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11587 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11588 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11589 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11590 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11591 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11592 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11593 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11594 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11595 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11596 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11597 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11598 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11599 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11600 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11601 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11602 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11603 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11604 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11605 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11606 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11607 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11608 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11609 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11610 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11611 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11612 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11613 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11614 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11615 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11616 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11617 }
11618 switch (enmEvent2)
11619 {
11620 /** @todo consider which extra parameters would be helpful for each probe. */
11621 case DBGFEVENT_END: break;
11622 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11623 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11624 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11625 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11626 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11627 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11628 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11629 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11630 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11631 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11632 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11633 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11634 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11635 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11636 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11637 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11638 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11639 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11640 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11641 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11642 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11643 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11644 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11645 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11646 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11647 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11648 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11649 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11650 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11651 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11652 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11653 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11654 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11655 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11656 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11657 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11658 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11659 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11660 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11661 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11662 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11663 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11664 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11665 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11666 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11667 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11668 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11669 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11670 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11671 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11672 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11673 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11674 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11675 }
11676 }
11677
11678 /*
11679 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11680 * the DBGF call will do a full check).
11681 *
11682 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11683 * Note! If we have to events, we prioritize the first, i.e. the instruction
11684 * one, in order to avoid event nesting.
11685 */
11686 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11687 if ( enmEvent1 != DBGFEVENT_END
11688 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11689 {
11690 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11691 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11692 if (rcStrict != VINF_SUCCESS)
11693 return rcStrict;
11694 }
11695 else if ( enmEvent2 != DBGFEVENT_END
11696 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11697 {
11698 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11699 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11700 if (rcStrict != VINF_SUCCESS)
11701 return rcStrict;
11702 }
11703
11704 return VINF_SUCCESS;
11705}
11706
11707
11708/**
11709 * Single-stepping VM-exit filtering.
11710 *
11711 * This is preprocessing the VM-exits and deciding whether we've gotten far
11712 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11713 * handling is performed.
11714 *
11715 * @returns Strict VBox status code (i.e. informational status codes too).
11716 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11717 * @param pVmxTransient The VMX-transient structure.
11718 * @param pDbgState The debug state.
11719 */
11720DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11721{
11722 /*
11723 * Expensive (saves context) generic dtrace VM-exit probe.
11724 */
11725 uint32_t const uExitReason = pVmxTransient->uExitReason;
11726 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11727 { /* more likely */ }
11728 else
11729 {
11730 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11731 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11732 AssertRC(rc);
11733 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11734 }
11735
11736#ifndef IN_NEM_DARWIN
11737 /*
11738 * Check for host NMI, just to get that out of the way.
11739 */
11740 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11741 { /* normally likely */ }
11742 else
11743 {
11744 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11745 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11746 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11747 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11748 }
11749#endif
11750
11751 /*
11752 * Check for single stepping event if we're stepping.
11753 */
11754 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11755 {
11756 switch (uExitReason)
11757 {
11758 case VMX_EXIT_MTF:
11759 return vmxHCExitMtf(pVCpu, pVmxTransient);
11760
11761 /* Various events: */
11762 case VMX_EXIT_XCPT_OR_NMI:
11763 case VMX_EXIT_EXT_INT:
11764 case VMX_EXIT_TRIPLE_FAULT:
11765 case VMX_EXIT_INT_WINDOW:
11766 case VMX_EXIT_NMI_WINDOW:
11767 case VMX_EXIT_TASK_SWITCH:
11768 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11769 case VMX_EXIT_APIC_ACCESS:
11770 case VMX_EXIT_EPT_VIOLATION:
11771 case VMX_EXIT_EPT_MISCONFIG:
11772 case VMX_EXIT_PREEMPT_TIMER:
11773
11774 /* Instruction specific VM-exits: */
11775 case VMX_EXIT_CPUID:
11776 case VMX_EXIT_GETSEC:
11777 case VMX_EXIT_HLT:
11778 case VMX_EXIT_INVD:
11779 case VMX_EXIT_INVLPG:
11780 case VMX_EXIT_RDPMC:
11781 case VMX_EXIT_RDTSC:
11782 case VMX_EXIT_RSM:
11783 case VMX_EXIT_VMCALL:
11784 case VMX_EXIT_VMCLEAR:
11785 case VMX_EXIT_VMLAUNCH:
11786 case VMX_EXIT_VMPTRLD:
11787 case VMX_EXIT_VMPTRST:
11788 case VMX_EXIT_VMREAD:
11789 case VMX_EXIT_VMRESUME:
11790 case VMX_EXIT_VMWRITE:
11791 case VMX_EXIT_VMXOFF:
11792 case VMX_EXIT_VMXON:
11793 case VMX_EXIT_MOV_CRX:
11794 case VMX_EXIT_MOV_DRX:
11795 case VMX_EXIT_IO_INSTR:
11796 case VMX_EXIT_RDMSR:
11797 case VMX_EXIT_WRMSR:
11798 case VMX_EXIT_MWAIT:
11799 case VMX_EXIT_MONITOR:
11800 case VMX_EXIT_PAUSE:
11801 case VMX_EXIT_GDTR_IDTR_ACCESS:
11802 case VMX_EXIT_LDTR_TR_ACCESS:
11803 case VMX_EXIT_INVEPT:
11804 case VMX_EXIT_RDTSCP:
11805 case VMX_EXIT_INVVPID:
11806 case VMX_EXIT_WBINVD:
11807 case VMX_EXIT_XSETBV:
11808 case VMX_EXIT_RDRAND:
11809 case VMX_EXIT_INVPCID:
11810 case VMX_EXIT_VMFUNC:
11811 case VMX_EXIT_RDSEED:
11812 case VMX_EXIT_XSAVES:
11813 case VMX_EXIT_XRSTORS:
11814 {
11815 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11816 AssertRCReturn(rc, rc);
11817 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11818 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11819 return VINF_EM_DBG_STEPPED;
11820 break;
11821 }
11822
11823 /* Errors and unexpected events: */
11824 case VMX_EXIT_INIT_SIGNAL:
11825 case VMX_EXIT_SIPI:
11826 case VMX_EXIT_IO_SMI:
11827 case VMX_EXIT_SMI:
11828 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11829 case VMX_EXIT_ERR_MSR_LOAD:
11830 case VMX_EXIT_ERR_MACHINE_CHECK:
11831 case VMX_EXIT_PML_FULL:
11832 case VMX_EXIT_VIRTUALIZED_EOI:
11833 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11834 break;
11835
11836 default:
11837 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11838 break;
11839 }
11840 }
11841
11842 /*
11843 * Check for debugger event breakpoints and dtrace probes.
11844 */
11845 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11846 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11847 {
11848 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11849 if (rcStrict != VINF_SUCCESS)
11850 return rcStrict;
11851 }
11852
11853 /*
11854 * Normal processing.
11855 */
11856#ifdef HMVMX_USE_FUNCTION_TABLE
11857 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11858#else
11859 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11860#endif
11861}
11862
11863/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette