VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 99654

Last change on this file since 99654 was 99654, checked in by vboxsync, 20 months ago

VMM: Nested VMX: bugref:10318 Doxygen.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 526.6 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 99654 2023-05-08 07:47:39Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related force-flags.
1700 *
1701 * @returns Guest's interruptibility-state.
1702 * @param pVCpu The cross context virtual CPU structure.
1703 *
1704 * @remarks No-long-jump zone!!!
1705 */
1706static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1707{
1708 uint32_t fIntrState;
1709
1710 /*
1711 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1712 */
1713 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1714 fIntrState = 0;
1715 else
1716 {
1717 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1719
1720 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1721 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1722 else
1723 {
1724 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1725
1726 /* Block-by-STI must not be set when interrupts are disabled. */
1727 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1728 }
1729 }
1730
1731 /*
1732 * Check if we should inhibit NMI delivery.
1733 */
1734 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1735 { /* likely */ }
1736 else
1737 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1738
1739 /*
1740 * Validate.
1741 */
1742 /* We don't support block-by-SMI yet.*/
1743 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1744
1745 return fIntrState;
1746}
1747
1748
1749/**
1750 * Exports the exception intercepts required for guest execution in the VMCS.
1751 *
1752 * @param pVCpu The cross context virtual CPU structure.
1753 * @param pVmxTransient The VMX-transient structure.
1754 *
1755 * @remarks No-long-jump zone!!!
1756 */
1757static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1758{
1759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1760 {
1761 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1762 if ( !pVmxTransient->fIsNestedGuest
1763 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1764 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1765 else
1766 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1767
1768 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1769 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1770 }
1771}
1772
1773
1774/**
1775 * Exports the guest's RIP into the guest-state area in the VMCS.
1776 *
1777 * @param pVCpu The cross context virtual CPU structure.
1778 *
1779 * @remarks No-long-jump zone!!!
1780 */
1781static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1782{
1783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1784 {
1785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1786
1787 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1788 AssertRC(rc);
1789
1790 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1791 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1792 }
1793}
1794
1795
1796/**
1797 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1798 *
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param pVmxTransient The VMX-transient structure.
1801 *
1802 * @remarks No-long-jump zone!!!
1803 */
1804static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1805{
1806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1807 {
1808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1809
1810 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1811 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1812 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1813 Use 32-bit VMWRITE. */
1814 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1815 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1816 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1817
1818#ifndef IN_NEM_DARWIN
1819 /*
1820 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1821 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1822 * can run the real-mode guest code under Virtual 8086 mode.
1823 */
1824 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1825 if (pVmcsInfo->RealMode.fRealOnV86Active)
1826 {
1827 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1828 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1829 Assert(!pVmxTransient->fIsNestedGuest);
1830 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1831 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1832 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1833 }
1834#else
1835 RT_NOREF(pVmxTransient);
1836#endif
1837
1838 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1839 AssertRC(rc);
1840
1841 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1842 Log4Func(("eflags=%#RX32\n", fEFlags));
1843 }
1844}
1845
1846
1847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1848/**
1849 * Copies the nested-guest VMCS to the shadow VMCS.
1850 *
1851 * @returns VBox status code.
1852 * @param pVCpu The cross context virtual CPU structure.
1853 * @param pVmcsInfo The VMCS info. object.
1854 *
1855 * @remarks No-long-jump zone!!!
1856 */
1857static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1858{
1859 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1860 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1861
1862 /*
1863 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1864 * current VMCS, as we may try saving guest lazy MSRs.
1865 *
1866 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1867 * calling the import VMCS code which is currently performing the guest MSR reads
1868 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1869 * and the rest of the VMX leave session machinery.
1870 */
1871 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1872
1873 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1874 if (RT_SUCCESS(rc))
1875 {
1876 /*
1877 * Copy all guest read/write VMCS fields.
1878 *
1879 * We don't check for VMWRITE failures here for performance reasons and
1880 * because they are not expected to fail, barring irrecoverable conditions
1881 * like hardware errors.
1882 */
1883 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1884 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1885 {
1886 uint64_t u64Val;
1887 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1888 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1889 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1890 }
1891
1892 /*
1893 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1894 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1895 */
1896 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1897 {
1898 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1899 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906 }
1907
1908 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1909 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1910 }
1911
1912 ASMSetFlags(fEFlags);
1913 return rc;
1914}
1915
1916
1917/**
1918 * Copies the shadow VMCS to the nested-guest VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVCpu The cross context virtual CPU structure.
1922 * @param pVmcsInfo The VMCS info. object.
1923 *
1924 * @remarks Called with interrupts disabled.
1925 */
1926static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1927{
1928 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1929 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1930 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1931
1932 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1933 if (RT_SUCCESS(rc))
1934 {
1935 /*
1936 * Copy guest read/write fields from the shadow VMCS.
1937 * Guest read-only fields cannot be modified, so no need to copy them.
1938 *
1939 * We don't check for VMREAD failures here for performance reasons and
1940 * because they are not expected to fail, barring irrecoverable conditions
1941 * like hardware errors.
1942 */
1943 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1944 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1945 {
1946 uint64_t u64Val;
1947 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1948 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1949 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1950 }
1951
1952 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1953 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1954 }
1955 return rc;
1956}
1957
1958
1959/**
1960 * Enables VMCS shadowing for the given VMCS info. object.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 * @param pVmcsInfo The VMCS info. object.
1964 *
1965 * @remarks No-long-jump zone!!!
1966 */
1967static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1968{
1969 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1970 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1971 {
1972 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1973 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1974 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1975 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1976 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1977 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1978 Log4Func(("Enabled\n"));
1979 }
1980}
1981
1982
1983/**
1984 * Disables VMCS shadowing for the given VMCS info. object.
1985 *
1986 * @param pVCpu The cross context virtual CPU structure.
1987 * @param pVmcsInfo The VMCS info. object.
1988 *
1989 * @remarks No-long-jump zone!!!
1990 */
1991static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1992{
1993 /*
1994 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1995 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1996 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1997 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1998 *
1999 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2000 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2001 */
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2004 {
2005 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2007 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2008 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2009 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2010 Log4Func(("Disabled\n"));
2011 }
2012}
2013#endif
2014
2015
2016/**
2017 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2018 *
2019 * The guest FPU state is always pre-loaded hence we don't need to bother about
2020 * sharing FPU related CR0 bits between the guest and host.
2021 *
2022 * @returns VBox status code.
2023 * @param pVCpu The cross context virtual CPU structure.
2024 * @param pVmxTransient The VMX-transient structure.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2029{
2030 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2031 {
2032 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2033 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2034
2035 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2036 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2037 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2038 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2039 else
2040 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2041
2042 if (!pVmxTransient->fIsNestedGuest)
2043 {
2044 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2045 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2046 uint64_t const u64ShadowCr0 = u64GuestCr0;
2047 Assert(!RT_HI_U32(u64GuestCr0));
2048
2049 /*
2050 * Setup VT-x's view of the guest CR0.
2051 */
2052 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2053 if (VM_IS_VMX_NESTED_PAGING(pVM))
2054 {
2055#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2056 if (CPUMIsGuestPagingEnabled(pVCpu))
2057 {
2058 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2059 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2060 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2061 }
2062 else
2063 {
2064 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2065 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2066 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2067 }
2068
2069 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2072#endif
2073 }
2074 else
2075 {
2076 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2077 u64GuestCr0 |= X86_CR0_WP;
2078 }
2079
2080 /*
2081 * Guest FPU bits.
2082 *
2083 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2084 * using CR0.TS.
2085 *
2086 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2087 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2088 */
2089 u64GuestCr0 |= X86_CR0_NE;
2090
2091 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2092 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2093
2094 /*
2095 * Update exception intercepts.
2096 */
2097 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2098#ifndef IN_NEM_DARWIN
2099 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2100 {
2101 Assert(PDMVmmDevHeapIsEnabled(pVM));
2102 Assert(pVM->hm.s.vmx.pRealModeTSS);
2103 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2104 }
2105 else
2106#endif
2107 {
2108 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2109 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2110 if (fInterceptMF)
2111 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2112 }
2113
2114 /* Additional intercepts for debugging, define these yourself explicitly. */
2115#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2116 uXcptBitmap |= 0
2117 | RT_BIT(X86_XCPT_BP)
2118 | RT_BIT(X86_XCPT_DE)
2119 | RT_BIT(X86_XCPT_NM)
2120 | RT_BIT(X86_XCPT_TS)
2121 | RT_BIT(X86_XCPT_UD)
2122 | RT_BIT(X86_XCPT_NP)
2123 | RT_BIT(X86_XCPT_SS)
2124 | RT_BIT(X86_XCPT_GP)
2125 | RT_BIT(X86_XCPT_PF)
2126 | RT_BIT(X86_XCPT_MF)
2127 ;
2128#elif defined(HMVMX_ALWAYS_TRAP_PF)
2129 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2130#endif
2131 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2133 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2134 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2135 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2136
2137 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2138 u64GuestCr0 |= fSetCr0;
2139 u64GuestCr0 &= fZapCr0;
2140 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2141
2142 Assert(!RT_HI_U32(u64GuestCr0));
2143 Assert(u64GuestCr0 & X86_CR0_NE);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177
2178 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2179 u64GuestCr0 |= fSetCr0;
2180 u64GuestCr0 &= fZapCr0;
2181 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2182
2183 Assert(!RT_HI_U32(u64GuestCr0));
2184 Assert(u64GuestCr0 & X86_CR0_NE);
2185
2186 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2187 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2188 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2189
2190 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2191 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2192 }
2193
2194 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2195 }
2196
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Exports the guest control registers (CR3, CR4) into the guest-state area
2203 * in the VMCS.
2204 *
2205 * @returns VBox strict status code.
2206 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2207 * without unrestricted guest access and the VMMDev is not presently
2208 * mapped (e.g. EFI32).
2209 *
2210 * @param pVCpu The cross context virtual CPU structure.
2211 * @param pVmxTransient The VMX-transient structure.
2212 *
2213 * @remarks No-long-jump zone!!!
2214 */
2215static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2216{
2217 int rc = VINF_SUCCESS;
2218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2219
2220 /*
2221 * Guest CR2.
2222 * It's always loaded in the assembler code. Nothing to do here.
2223 */
2224
2225 /*
2226 * Guest CR3.
2227 */
2228 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2229 {
2230 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2231
2232 if (VM_IS_VMX_NESTED_PAGING(pVM))
2233 {
2234#ifndef IN_NEM_DARWIN
2235 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2236 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2237
2238 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2239 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2240 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2241 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2242
2243 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2244 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2245 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2246
2247 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2248 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2249 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2250 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2251 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2252 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2253 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2254
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2256 AssertRC(rc);
2257#endif
2258
2259 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2260 uint64_t u64GuestCr3 = pCtx->cr3;
2261 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2262 || CPUMIsGuestPagingEnabledEx(pCtx))
2263 {
2264 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2265 if (CPUMIsGuestInPAEModeEx(pCtx))
2266 {
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2271 }
2272
2273 /*
2274 * The guest's view of its CR3 is unblemished with nested paging when the
2275 * guest is using paging or we have unrestricted guest execution to handle
2276 * the guest when it's not using paging.
2277 */
2278 }
2279#ifndef IN_NEM_DARWIN
2280 else
2281 {
2282 /*
2283 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2284 * thinks it accesses physical memory directly, we use our identity-mapped
2285 * page table to map guest-linear to guest-physical addresses. EPT takes care
2286 * of translating it to host-physical addresses.
2287 */
2288 RTGCPHYS GCPhys;
2289 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2290
2291 /* We obtain it here every time as the guest could have relocated this PCI region. */
2292 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2293 if (RT_SUCCESS(rc))
2294 { /* likely */ }
2295 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2296 {
2297 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2298 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2299 }
2300 else
2301 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2302
2303 u64GuestCr3 = GCPhys;
2304 }
2305#endif
2306
2307 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2308 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2309 AssertRC(rc);
2310 }
2311 else
2312 {
2313 Assert(!pVmxTransient->fIsNestedGuest);
2314 /* Non-nested paging case, just use the hypervisor's CR3. */
2315 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2316
2317 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2318 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2319 AssertRC(rc);
2320 }
2321
2322 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2323 }
2324
2325 /*
2326 * Guest CR4.
2327 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2328 */
2329 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2330 {
2331 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2332 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2333
2334 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2335 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2336
2337 /*
2338 * With nested-guests, we may have extended the guest/host mask here (since we
2339 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2340 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2341 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2342 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2343 */
2344 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2345 uint64_t u64GuestCr4 = pCtx->cr4;
2346 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2347 ? pCtx->cr4
2348 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2349 Assert(!RT_HI_U32(u64GuestCr4));
2350
2351#ifndef IN_NEM_DARWIN
2352 /*
2353 * Setup VT-x's view of the guest CR4.
2354 *
2355 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2356 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2357 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2358 *
2359 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2360 */
2361 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2362 {
2363 Assert(pVM->hm.s.vmx.pRealModeTSS);
2364 Assert(PDMVmmDevHeapIsEnabled(pVM));
2365 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2366 }
2367#endif
2368
2369 if (VM_IS_VMX_NESTED_PAGING(pVM))
2370 {
2371 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2372 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2373 {
2374 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2375 u64GuestCr4 |= X86_CR4_PSE;
2376 /* Our identity mapping is a 32-bit page directory. */
2377 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2378 }
2379 /* else use guest CR4.*/
2380 }
2381 else
2382 {
2383 Assert(!pVmxTransient->fIsNestedGuest);
2384
2385 /*
2386 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2387 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2388 */
2389 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2390 {
2391 case PGMMODE_REAL: /* Real-mode. */
2392 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2393 case PGMMODE_32_BIT: /* 32-bit paging. */
2394 {
2395 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2396 break;
2397 }
2398
2399 case PGMMODE_PAE: /* PAE paging. */
2400 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2401 {
2402 u64GuestCr4 |= X86_CR4_PAE;
2403 break;
2404 }
2405
2406 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2407 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2408 {
2409#ifdef VBOX_WITH_64_BITS_GUESTS
2410 /* For our assumption in vmxHCShouldSwapEferMsr. */
2411 Assert(u64GuestCr4 & X86_CR4_PAE);
2412 break;
2413#endif
2414 }
2415 default:
2416 AssertFailed();
2417 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2418 }
2419 }
2420
2421 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2422 u64GuestCr4 |= fSetCr4;
2423 u64GuestCr4 &= fZapCr4;
2424
2425 Assert(!RT_HI_U32(u64GuestCr4));
2426 Assert(u64GuestCr4 & X86_CR4_VMXE);
2427
2428 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2429 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2431
2432#ifndef IN_NEM_DARWIN
2433 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2434 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2435 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2436 {
2437 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2438 hmR0VmxUpdateStartVmFunction(pVCpu);
2439 }
2440#endif
2441
2442 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2443
2444 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2445 }
2446 return rc;
2447}
2448
2449
2450#ifdef VBOX_STRICT
2451/**
2452 * Strict function to validate segment registers.
2453 *
2454 * @param pVCpu The cross context virtual CPU structure.
2455 * @param pVmcsInfo The VMCS info. object.
2456 *
2457 * @remarks Will import guest CR0 on strict builds during validation of
2458 * segments.
2459 */
2460static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2461{
2462 /*
2463 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2464 *
2465 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2466 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2467 * unusable bit and doesn't change the guest-context value.
2468 */
2469 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2470 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2471 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2472 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2473 && ( !CPUMIsGuestInRealModeEx(pCtx)
2474 && !CPUMIsGuestInV86ModeEx(pCtx)))
2475 {
2476 /* Protected mode checks */
2477 /* CS */
2478 Assert(pCtx->cs.Attr.n.u1Present);
2479 Assert(!(pCtx->cs.Attr.u & 0xf00));
2480 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2481 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2482 || !(pCtx->cs.Attr.n.u1Granularity));
2483 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2484 || (pCtx->cs.Attr.n.u1Granularity));
2485 /* CS cannot be loaded with NULL in protected mode. */
2486 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2487 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2488 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2489 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2490 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2491 else
2492 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2493 /* SS */
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2496 if ( !(pCtx->cr0 & X86_CR0_PE)
2497 || pCtx->cs.Attr.n.u4Type == 3)
2498 {
2499 Assert(!pCtx->ss.Attr.n.u2Dpl);
2500 }
2501 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2502 {
2503 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2504 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2505 Assert(pCtx->ss.Attr.n.u1Present);
2506 Assert(!(pCtx->ss.Attr.u & 0xf00));
2507 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2508 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2509 || !(pCtx->ss.Attr.n.u1Granularity));
2510 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2511 || (pCtx->ss.Attr.n.u1Granularity));
2512 }
2513 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2514 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2515 {
2516 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2517 Assert(pCtx->ds.Attr.n.u1Present);
2518 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2519 Assert(!(pCtx->ds.Attr.u & 0xf00));
2520 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2521 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2522 || !(pCtx->ds.Attr.n.u1Granularity));
2523 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2524 || (pCtx->ds.Attr.n.u1Granularity));
2525 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2526 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2527 }
2528 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->es.Attr.n.u1Present);
2532 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->es.Attr.u & 0xf00));
2534 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->es.Attr.n.u1Granularity));
2537 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2538 || (pCtx->es.Attr.n.u1Granularity));
2539 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->fs.Attr.n.u1Present);
2546 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->fs.Attr.u & 0xf00));
2548 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->fs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2552 || (pCtx->fs.Attr.n.u1Granularity));
2553 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->gs.Attr.n.u1Present);
2560 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->gs.Attr.u & 0xf00));
2562 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->gs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2566 || (pCtx->gs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 /* 64-bit capable CPUs. */
2571 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2572 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2573 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2574 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2575 }
2576 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2577 || ( CPUMIsGuestInRealModeEx(pCtx)
2578 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2579 {
2580 /* Real and v86 mode checks. */
2581 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2582 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2583#ifndef IN_NEM_DARWIN
2584 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2585 {
2586 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2587 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2588 }
2589 else
2590#endif
2591 {
2592 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2593 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2594 }
2595
2596 /* CS */
2597 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2598 Assert(pCtx->cs.u32Limit == 0xffff);
2599 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2600 /* SS */
2601 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2602 Assert(pCtx->ss.u32Limit == 0xffff);
2603 Assert(u32SSAttr == 0xf3);
2604 /* DS */
2605 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2606 Assert(pCtx->ds.u32Limit == 0xffff);
2607 Assert(u32DSAttr == 0xf3);
2608 /* ES */
2609 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2610 Assert(pCtx->es.u32Limit == 0xffff);
2611 Assert(u32ESAttr == 0xf3);
2612 /* FS */
2613 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2614 Assert(pCtx->fs.u32Limit == 0xffff);
2615 Assert(u32FSAttr == 0xf3);
2616 /* GS */
2617 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2618 Assert(pCtx->gs.u32Limit == 0xffff);
2619 Assert(u32GSAttr == 0xf3);
2620 /* 64-bit capable CPUs. */
2621 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2622 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2623 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2624 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2625 }
2626}
2627#endif /* VBOX_STRICT */
2628
2629
2630/**
2631 * Exports a guest segment register into the guest-state area in the VMCS.
2632 *
2633 * @returns VBox status code.
2634 * @param pVCpu The cross context virtual CPU structure.
2635 * @param pVmcsInfo The VMCS info. object.
2636 * @param iSegReg The segment register number (X86_SREG_XXX).
2637 * @param pSelReg Pointer to the segment selector.
2638 *
2639 * @remarks No-long-jump zone!!!
2640 */
2641static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2642{
2643 Assert(iSegReg < X86_SREG_COUNT);
2644
2645 uint32_t u32Access = pSelReg->Attr.u;
2646#ifndef IN_NEM_DARWIN
2647 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2648#endif
2649 {
2650 /*
2651 * The way to differentiate between whether this is really a null selector or was just
2652 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2653 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2654 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2655 * NULL selectors loaded in protected-mode have their attribute as 0.
2656 */
2657 if (u32Access)
2658 { }
2659 else
2660 u32Access = X86DESCATTR_UNUSABLE;
2661 }
2662#ifndef IN_NEM_DARWIN
2663 else
2664 {
2665 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2666 u32Access = 0xf3;
2667 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2668 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2669 RT_NOREF_PV(pVCpu);
2670 }
2671#else
2672 RT_NOREF(pVmcsInfo);
2673#endif
2674
2675 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2676 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2677 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2678
2679 /*
2680 * Commit it to the VMCS.
2681 */
2682 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2683 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2686 return VINF_SUCCESS;
2687}
2688
2689
2690/**
2691 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2692 * area in the VMCS.
2693 *
2694 * @returns VBox status code.
2695 * @param pVCpu The cross context virtual CPU structure.
2696 * @param pVmxTransient The VMX-transient structure.
2697 *
2698 * @remarks Will import guest CR0 on strict builds during validation of
2699 * segments.
2700 * @remarks No-long-jump zone!!!
2701 */
2702static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2703{
2704 int rc = VERR_INTERNAL_ERROR_5;
2705#ifndef IN_NEM_DARWIN
2706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2707#endif
2708 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2709 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2710#ifndef IN_NEM_DARWIN
2711 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2712#endif
2713
2714 /*
2715 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2716 */
2717 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2718 {
2719 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2720 {
2721 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2722#ifndef IN_NEM_DARWIN
2723 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2724 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2725#endif
2726 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2727 AssertRC(rc);
2728 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2729 }
2730
2731 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2732 {
2733 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2734#ifndef IN_NEM_DARWIN
2735 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2736 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2737#endif
2738 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2739 AssertRC(rc);
2740 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2741 }
2742
2743 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2744 {
2745 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2746#ifndef IN_NEM_DARWIN
2747 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2748 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2749#endif
2750 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2751 AssertRC(rc);
2752 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2753 }
2754
2755 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2756 {
2757 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2758#ifndef IN_NEM_DARWIN
2759 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2760 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2761#endif
2762 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2763 AssertRC(rc);
2764 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2765 }
2766
2767 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2768 {
2769 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2770#ifndef IN_NEM_DARWIN
2771 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2772 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2773#endif
2774 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2775 AssertRC(rc);
2776 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2777 }
2778
2779 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2780 {
2781 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2782#ifndef IN_NEM_DARWIN
2783 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2784 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2785#endif
2786 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2787 AssertRC(rc);
2788 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2789 }
2790
2791#ifdef VBOX_STRICT
2792 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2793#endif
2794 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2795 pCtx->cs.Attr.u));
2796 }
2797
2798 /*
2799 * Guest TR.
2800 */
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2804
2805 /*
2806 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2807 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2808 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2809 */
2810 uint16_t u16Sel;
2811 uint32_t u32Limit;
2812 uint64_t u64Base;
2813 uint32_t u32AccessRights;
2814#ifndef IN_NEM_DARWIN
2815 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2816#endif
2817 {
2818 u16Sel = pCtx->tr.Sel;
2819 u32Limit = pCtx->tr.u32Limit;
2820 u64Base = pCtx->tr.u64Base;
2821 u32AccessRights = pCtx->tr.Attr.u;
2822 }
2823#ifndef IN_NEM_DARWIN
2824 else
2825 {
2826 Assert(!pVmxTransient->fIsNestedGuest);
2827 Assert(pVM->hm.s.vmx.pRealModeTSS);
2828 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2829
2830 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2831 RTGCPHYS GCPhys;
2832 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2833 AssertRCReturn(rc, rc);
2834
2835 X86DESCATTR DescAttr;
2836 DescAttr.u = 0;
2837 DescAttr.n.u1Present = 1;
2838 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2839
2840 u16Sel = 0;
2841 u32Limit = HM_VTX_TSS_SIZE;
2842 u64Base = GCPhys;
2843 u32AccessRights = DescAttr.u;
2844 }
2845#endif
2846
2847 /* Validate. */
2848 Assert(!(u16Sel & RT_BIT(2)));
2849 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2850 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2851 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2852 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2853 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2854 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2855 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2856 Assert( (u32Limit & 0xfff) == 0xfff
2857 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2858 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2859 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2860
2861 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2865
2866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2867 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2868 }
2869
2870 /*
2871 * Guest GDTR.
2872 */
2873 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2874 {
2875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2876
2877 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2878 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2879
2880 /* Validate. */
2881 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2882
2883 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2884 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2885 }
2886
2887 /*
2888 * Guest LDTR.
2889 */
2890 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2891 {
2892 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2893
2894 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2895 uint32_t u32Access;
2896 if ( !pVmxTransient->fIsNestedGuest
2897 && !pCtx->ldtr.Attr.u)
2898 u32Access = X86DESCATTR_UNUSABLE;
2899 else
2900 u32Access = pCtx->ldtr.Attr.u;
2901
2902 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2906
2907 /* Validate. */
2908 if (!(u32Access & X86DESCATTR_UNUSABLE))
2909 {
2910 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2911 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2912 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2913 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2914 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2915 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2916 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2917 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2918 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2919 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2920 }
2921
2922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2923 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2924 }
2925
2926 /*
2927 * Guest IDTR.
2928 */
2929 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2930 {
2931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2932
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2935
2936 /* Validate. */
2937 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2938
2939 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2940 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2941 }
2942
2943 return VINF_SUCCESS;
2944}
2945
2946
2947/**
2948 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2949 * VM-exit interruption info type.
2950 *
2951 * @returns The IEM exception flags.
2952 * @param uVector The event vector.
2953 * @param uVmxEventType The VMX event type.
2954 *
2955 * @remarks This function currently only constructs flags required for
2956 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2957 * and CR2 aspects of an exception are not included).
2958 */
2959static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2960{
2961 uint32_t fIemXcptFlags;
2962 switch (uVmxEventType)
2963 {
2964 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2965 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2966 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2967 break;
2968
2969 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2971 break;
2972
2973 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2975 break;
2976
2977 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2978 {
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2980 if (uVector == X86_XCPT_BP)
2981 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2982 else if (uVector == X86_XCPT_OF)
2983 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2984 else
2985 {
2986 fIemXcptFlags = 0;
2987 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2988 }
2989 break;
2990 }
2991
2992 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2993 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2994 break;
2995
2996 default:
2997 fIemXcptFlags = 0;
2998 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2999 break;
3000 }
3001 return fIemXcptFlags;
3002}
3003
3004
3005/**
3006 * Sets an event as a pending event to be injected into the guest.
3007 *
3008 * @param pVCpu The cross context virtual CPU structure.
3009 * @param u32IntInfo The VM-entry interruption-information field.
3010 * @param cbInstr The VM-entry instruction length in bytes (for
3011 * software interrupts, exceptions and privileged
3012 * software exceptions).
3013 * @param u32ErrCode The VM-entry exception error code.
3014 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3015 * page-fault.
3016 */
3017DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3018 RTGCUINTPTR GCPtrFaultAddress)
3019{
3020 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3021 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3022 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3024 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3025 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3026}
3027
3028
3029/**
3030 * Sets an external interrupt as pending-for-injection into the VM.
3031 *
3032 * @param pVCpu The cross context virtual CPU structure.
3033 * @param u8Interrupt The external interrupt vector.
3034 */
3035DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3036{
3037 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3041 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3042 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
3043}
3044
3045
3046/**
3047 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3048 *
3049 * @param pVCpu The cross context virtual CPU structure.
3050 */
3051DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3052{
3053 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3056 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3057 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3058 Log4Func(("NMI pending injection\n"));
3059}
3060
3061
3062/**
3063 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3064 *
3065 * @param pVCpu The cross context virtual CPU structure.
3066 */
3067DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3068{
3069 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3071 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3072 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3073 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3074}
3075
3076
3077/**
3078 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3079 *
3080 * @param pVCpu The cross context virtual CPU structure.
3081 */
3082DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3083{
3084 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3087 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3088 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3089}
3090
3091
3092/**
3093 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3094 *
3095 * @param pVCpu The cross context virtual CPU structure.
3096 */
3097DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3098{
3099 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3102 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3103 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3104}
3105
3106
3107#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3108/**
3109 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3110 *
3111 * @param pVCpu The cross context virtual CPU structure.
3112 * @param u32ErrCode The error code for the general-protection exception.
3113 */
3114DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3115{
3116 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3118 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3119 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3120 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3121}
3122
3123
3124/**
3125 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3126 *
3127 * @param pVCpu The cross context virtual CPU structure.
3128 * @param u32ErrCode The error code for the stack exception.
3129 */
3130DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3131{
3132 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3134 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3135 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3136 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3137}
3138#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3139
3140
3141/**
3142 * Fixes up attributes for the specified segment register.
3143 *
3144 * @param pVCpu The cross context virtual CPU structure.
3145 * @param pSelReg The segment register that needs fixing.
3146 * @param pszRegName The register name (for logging and assertions).
3147 */
3148static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3149{
3150 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3151
3152 /*
3153 * If VT-x marks the segment as unusable, most other bits remain undefined:
3154 * - For CS the L, D and G bits have meaning.
3155 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3156 * - For the remaining data segments no bits are defined.
3157 *
3158 * The present bit and the unusable bit has been observed to be set at the
3159 * same time (the selector was supposed to be invalid as we started executing
3160 * a V8086 interrupt in ring-0).
3161 *
3162 * What should be important for the rest of the VBox code, is that the P bit is
3163 * cleared. Some of the other VBox code recognizes the unusable bit, but
3164 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3165 * safe side here, we'll strip off P and other bits we don't care about. If
3166 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3167 *
3168 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3169 */
3170#ifdef VBOX_STRICT
3171 uint32_t const uAttr = pSelReg->Attr.u;
3172#endif
3173
3174 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3175 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3176 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3177
3178#ifdef VBOX_STRICT
3179# ifndef IN_NEM_DARWIN
3180 VMMRZCallRing3Disable(pVCpu);
3181# endif
3182 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3183# ifdef DEBUG_bird
3184 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3185 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3186 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3187# endif
3188# ifndef IN_NEM_DARWIN
3189 VMMRZCallRing3Enable(pVCpu);
3190# endif
3191 NOREF(uAttr);
3192#endif
3193 RT_NOREF2(pVCpu, pszRegName);
3194}
3195
3196
3197/**
3198 * Imports a guest segment register from the current VMCS into the guest-CPU
3199 * context.
3200 *
3201 * @param pVCpu The cross context virtual CPU structure.
3202 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3203 *
3204 * @remarks Called with interrupts and/or preemption disabled.
3205 */
3206template<uint32_t const a_iSegReg>
3207DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3208{
3209 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3210 /* Check that the macros we depend upon here and in the export parenter function works: */
3211#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3212 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3213 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3216 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3217 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3218 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3219 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3220 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3221 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3222
3223 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3224
3225 uint16_t u16Sel;
3226 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3227 pSelReg->Sel = u16Sel;
3228 pSelReg->ValidSel = u16Sel;
3229
3230 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3231 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3232
3233 uint32_t u32Attr;
3234 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3235 pSelReg->Attr.u = u32Attr;
3236 if (u32Attr & X86DESCATTR_UNUSABLE)
3237 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3238
3239 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3240}
3241
3242
3243/**
3244 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3245 *
3246 * @param pVCpu The cross context virtual CPU structure.
3247 *
3248 * @remarks Called with interrupts and/or preemption disabled.
3249 */
3250DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3251{
3252 uint16_t u16Sel;
3253 uint64_t u64Base;
3254 uint32_t u32Limit, u32Attr;
3255 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3256 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3257 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3258 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3259
3260 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3261 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3262 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3263 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3264 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3265 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3266 if (u32Attr & X86DESCATTR_UNUSABLE)
3267 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3268}
3269
3270
3271/**
3272 * Imports the guest TR from the VMCS into the guest-CPU context.
3273 *
3274 * @param pVCpu The cross context virtual CPU structure.
3275 *
3276 * @remarks Called with interrupts and/or preemption disabled.
3277 */
3278DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3279{
3280 uint16_t u16Sel;
3281 uint64_t u64Base;
3282 uint32_t u32Limit, u32Attr;
3283 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3284 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3285 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3286 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3287
3288 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3289 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3290 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3291 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3292 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3293 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3294 /* TR is the only selector that can never be unusable. */
3295 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3296}
3297
3298
3299/**
3300 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3301 *
3302 * @returns The RIP value.
3303 * @param pVCpu The cross context virtual CPU structure.
3304 *
3305 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3306 * @remarks Do -not- call this function directly!
3307 */
3308DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3309{
3310 uint64_t u64Val;
3311 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3312 AssertRC(rc);
3313
3314 pVCpu->cpum.GstCtx.rip = u64Val;
3315
3316 return u64Val;
3317}
3318
3319
3320/**
3321 * Imports the guest RIP from the VMCS into the guest-CPU context.
3322 *
3323 * @param pVCpu The cross context virtual CPU structure.
3324 *
3325 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3326 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3327 * instead!!!
3328 */
3329DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3330{
3331 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3332 {
3333 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3334 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3335 }
3336}
3337
3338
3339/**
3340 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3341 *
3342 * @param pVCpu The cross context virtual CPU structure.
3343 * @param pVmcsInfo The VMCS info. object.
3344 *
3345 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3346 * @remarks Do -not- call this function directly!
3347 */
3348DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3349{
3350 uint64_t fRFlags;
3351 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3352 AssertRC(rc);
3353
3354 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3355 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3356
3357 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3358#ifndef IN_NEM_DARWIN
3359 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3360 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3361 { /* mostly likely */ }
3362 else
3363 {
3364 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3365 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3366 }
3367#else
3368 RT_NOREF(pVmcsInfo);
3369#endif
3370}
3371
3372
3373/**
3374 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3375 *
3376 * @param pVCpu The cross context virtual CPU structure.
3377 * @param pVmcsInfo The VMCS info. object.
3378 *
3379 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3380 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3381 * instead!!!
3382 */
3383DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3384{
3385 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3386 {
3387 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3388 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3389 }
3390}
3391
3392
3393#ifndef IN_NEM_DARWIN
3394/**
3395 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3396 * context.
3397 *
3398 * The other MSRs are in the VM-exit MSR-store.
3399 *
3400 * @returns VBox status code.
3401 * @param pVCpu The cross context virtual CPU structure.
3402 * @param pVmcsInfo The VMCS info. object.
3403 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3404 * unexpected errors). Ignored in NEM/darwin context.
3405 */
3406DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3407{
3408 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3409 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3410 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3411 Assert(pMsrs);
3412 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3413 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3414 for (uint32_t i = 0; i < cMsrs; i++)
3415 {
3416 uint32_t const idMsr = pMsrs[i].u32Msr;
3417 switch (idMsr)
3418 {
3419 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3420 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3421 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3422 default:
3423 {
3424 uint32_t idxLbrMsr;
3425 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3426 if (VM_IS_VMX_LBR(pVM))
3427 {
3428 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3429 {
3430 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3431 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3432 break;
3433 }
3434 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3435 {
3436 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3437 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3438 break;
3439 }
3440 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3441 {
3442 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3443 break;
3444 }
3445 /* Fallthru (no break) */
3446 }
3447 pVCpu->cpum.GstCtx.fExtrn = 0;
3448 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3449 ASMSetFlags(fEFlags);
3450 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3451 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3452 }
3453 }
3454 }
3455 return VINF_SUCCESS;
3456}
3457#endif /* !IN_NEM_DARWIN */
3458
3459
3460/**
3461 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3462 *
3463 * @param pVCpu The cross context virtual CPU structure.
3464 * @param pVmcsInfo The VMCS info. object.
3465 */
3466DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3467{
3468 uint64_t u64Cr0;
3469 uint64_t u64Shadow;
3470 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3471 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3472#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3473 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3474 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3475#else
3476 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3477 {
3478 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3479 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3480 }
3481 else
3482 {
3483 /*
3484 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3485 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3486 * re-construct CR0. See @bugref{9180#c95} for details.
3487 */
3488 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3489 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3490 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3491 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3492 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3493 Assert(u64Cr0 & X86_CR0_NE);
3494 }
3495#endif
3496
3497#ifndef IN_NEM_DARWIN
3498 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3499#endif
3500 CPUMSetGuestCR0(pVCpu, u64Cr0);
3501#ifndef IN_NEM_DARWIN
3502 VMMRZCallRing3Enable(pVCpu);
3503#endif
3504}
3505
3506
3507/**
3508 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3509 *
3510 * @param pVCpu The cross context virtual CPU structure.
3511 */
3512DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3513{
3514 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3515 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3516
3517 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3518 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3519 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3520 && CPUMIsGuestPagingEnabledEx(pCtx)))
3521 {
3522 uint64_t u64Cr3;
3523 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3524 if (pCtx->cr3 != u64Cr3)
3525 {
3526 pCtx->cr3 = u64Cr3;
3527 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3528 }
3529
3530 /*
3531 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3532 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3533 */
3534 if (CPUMIsGuestInPAEModeEx(pCtx))
3535 {
3536 X86PDPE aPaePdpes[4];
3537 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3538 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3539 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3540 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3541 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3542 {
3543 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3544 /* PGM now updates PAE PDPTEs while updating CR3. */
3545 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3546 }
3547 }
3548 }
3549}
3550
3551
3552/**
3553 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3554 *
3555 * @param pVCpu The cross context virtual CPU structure.
3556 * @param pVmcsInfo The VMCS info. object.
3557 */
3558DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3559{
3560 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3561 uint64_t u64Cr4;
3562 uint64_t u64Shadow;
3563 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3564 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3565#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3566 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3567 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3568#else
3569 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3570 {
3571 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3572 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3573 }
3574 else
3575 {
3576 /*
3577 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3578 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3579 * re-construct CR4. See @bugref{9180#c95} for details.
3580 */
3581 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3582 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3583 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3584 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3585 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3586 Assert(u64Cr4 & X86_CR4_VMXE);
3587 }
3588#endif
3589 pCtx->cr4 = u64Cr4;
3590}
3591
3592
3593/**
3594 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3595 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3596 */
3597DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3598{
3599 /*
3600 * We must import RIP here to set our EM interrupt-inhibited state.
3601 * We also import RFLAGS as our code that evaluates pending interrupts
3602 * before VM-entry requires it.
3603 */
3604 vmxHCImportGuestRip(pVCpu);
3605 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3606
3607 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3608 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3609 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3610 pVCpu->cpum.GstCtx.rip);
3611 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3612}
3613
3614
3615/**
3616 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3617 * context.
3618 *
3619 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3620 *
3621 * @param pVCpu The cross context virtual CPU structure.
3622 * @param pVmcsInfo The VMCS info. object.
3623 *
3624 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3625 * do not log!
3626 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3627 * instead!!!
3628 */
3629DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3630{
3631 uint32_t u32Val;
3632 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3633 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3634 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3635 if (!u32Val)
3636 {
3637 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3638 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3639 }
3640 else
3641 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3642}
3643
3644
3645/**
3646 * Worker for VMXR0ImportStateOnDemand.
3647 *
3648 * @returns VBox status code.
3649 * @param pVCpu The cross context virtual CPU structure.
3650 * @param pVmcsInfo The VMCS info. object.
3651 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3652 */
3653static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3654{
3655 int rc = VINF_SUCCESS;
3656 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3657 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3658 uint32_t u32Val;
3659
3660 /*
3661 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3662 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3663 * neither are other host platforms.
3664 *
3665 * Committing this temporarily as it prevents BSOD.
3666 *
3667 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3668 */
3669#ifdef RT_OS_WINDOWS
3670 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3671 return VERR_HM_IPE_1;
3672#endif
3673
3674 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3675
3676#ifndef IN_NEM_DARWIN
3677 /*
3678 * We disable interrupts to make the updating of the state and in particular
3679 * the fExtrn modification atomic wrt to preemption hooks.
3680 */
3681 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3682#endif
3683
3684 fWhat &= pCtx->fExtrn;
3685 if (fWhat)
3686 {
3687 do
3688 {
3689 if (fWhat & CPUMCTX_EXTRN_RIP)
3690 vmxHCImportGuestRip(pVCpu);
3691
3692 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3693 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3694
3695 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3696 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3697 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3698
3699 if (fWhat & CPUMCTX_EXTRN_RSP)
3700 {
3701 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3702 AssertRC(rc);
3703 }
3704
3705 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3706 {
3707 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3708#ifndef IN_NEM_DARWIN
3709 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3710#else
3711 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3712#endif
3713 if (fWhat & CPUMCTX_EXTRN_CS)
3714 {
3715 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3716 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3717 if (fRealOnV86Active)
3718 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3719 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3720 }
3721 if (fWhat & CPUMCTX_EXTRN_SS)
3722 {
3723 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3724 if (fRealOnV86Active)
3725 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3726 }
3727 if (fWhat & CPUMCTX_EXTRN_DS)
3728 {
3729 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3730 if (fRealOnV86Active)
3731 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3732 }
3733 if (fWhat & CPUMCTX_EXTRN_ES)
3734 {
3735 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3736 if (fRealOnV86Active)
3737 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3738 }
3739 if (fWhat & CPUMCTX_EXTRN_FS)
3740 {
3741 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3742 if (fRealOnV86Active)
3743 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3744 }
3745 if (fWhat & CPUMCTX_EXTRN_GS)
3746 {
3747 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3748 if (fRealOnV86Active)
3749 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3750 }
3751 }
3752
3753 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3754 {
3755 if (fWhat & CPUMCTX_EXTRN_LDTR)
3756 vmxHCImportGuestLdtr(pVCpu);
3757
3758 if (fWhat & CPUMCTX_EXTRN_GDTR)
3759 {
3760 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3761 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3762 pCtx->gdtr.cbGdt = u32Val;
3763 }
3764
3765 /* Guest IDTR. */
3766 if (fWhat & CPUMCTX_EXTRN_IDTR)
3767 {
3768 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3769 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3770 pCtx->idtr.cbIdt = u32Val;
3771 }
3772
3773 /* Guest TR. */
3774 if (fWhat & CPUMCTX_EXTRN_TR)
3775 {
3776#ifndef IN_NEM_DARWIN
3777 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3778 don't need to import that one. */
3779 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3780#endif
3781 vmxHCImportGuestTr(pVCpu);
3782 }
3783 }
3784
3785 if (fWhat & CPUMCTX_EXTRN_DR7)
3786 {
3787#ifndef IN_NEM_DARWIN
3788 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3789#endif
3790 {
3791 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3792 AssertRC(rc);
3793 }
3794 }
3795
3796 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3797 {
3798 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3799 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3800 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3801 pCtx->SysEnter.cs = u32Val;
3802 }
3803
3804#ifndef IN_NEM_DARWIN
3805 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3806 {
3807 if ( pVM->hmr0.s.fAllow64BitGuests
3808 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3809 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3810 }
3811
3812 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3813 {
3814 if ( pVM->hmr0.s.fAllow64BitGuests
3815 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3816 {
3817 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3818 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3819 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3820 }
3821 }
3822
3823 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3824 {
3825 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3826 AssertRCReturn(rc, rc);
3827 }
3828#else
3829 NOREF(pVM);
3830#endif
3831
3832 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3833 {
3834 if (fWhat & CPUMCTX_EXTRN_CR0)
3835 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3836
3837 if (fWhat & CPUMCTX_EXTRN_CR4)
3838 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3839
3840 if (fWhat & CPUMCTX_EXTRN_CR3)
3841 vmxHCImportGuestCr3(pVCpu);
3842 }
3843
3844#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3845 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3846 {
3847 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3848 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3849 {
3850 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3851 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3852 if (RT_SUCCESS(rc))
3853 { /* likely */ }
3854 else
3855 break;
3856 }
3857 }
3858#endif
3859 } while (0);
3860
3861 if (RT_SUCCESS(rc))
3862 {
3863 /* Update fExtrn. */
3864 pCtx->fExtrn &= ~fWhat;
3865
3866 /* If everything has been imported, clear the HM keeper bit. */
3867 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3868 {
3869#ifndef IN_NEM_DARWIN
3870 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3871#else
3872 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3873#endif
3874 Assert(!pCtx->fExtrn);
3875 }
3876 }
3877 }
3878#ifndef IN_NEM_DARWIN
3879 else
3880 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3881
3882 /*
3883 * Restore interrupts.
3884 */
3885 ASMSetFlags(fEFlags);
3886#endif
3887
3888 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3889
3890 if (RT_SUCCESS(rc))
3891 { /* likely */ }
3892 else
3893 return rc;
3894
3895 /*
3896 * Honor any pending CR3 updates.
3897 *
3898 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3899 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3900 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3901 *
3902 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3903 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3904 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3905 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3906 *
3907 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3908 *
3909 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3910 */
3911 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3912#ifndef IN_NEM_DARWIN
3913 && VMMRZCallRing3IsEnabled(pVCpu)
3914#endif
3915 )
3916 {
3917 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3918 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3919 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3920 }
3921
3922 return VINF_SUCCESS;
3923}
3924
3925
3926/**
3927 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3928 *
3929 * @returns VBox status code.
3930 * @param pVCpu The cross context virtual CPU structure.
3931 * @param pVmcsInfo The VMCS info. object.
3932 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3933 * in NEM/darwin context.
3934 * @tparam a_fWhat What to import, zero or more bits from
3935 * HMVMX_CPUMCTX_EXTRN_ALL.
3936 */
3937template<uint64_t const a_fWhat>
3938static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3939{
3940 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3941 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3942 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3943 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3944
3945 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3946
3947 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3948
3949 /* RIP and RFLAGS may have been imported already by the post exit code
3950 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3951 of the code is skipping this part of the code. */
3952 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3953 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3954 {
3955 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3956 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3957
3958 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3959 {
3960 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3961 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3962 else
3963 vmxHCImportGuestCoreRip(pVCpu);
3964 }
3965 }
3966
3967 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3968 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3969 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3970
3971 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3972 {
3973 if (a_fWhat & CPUMCTX_EXTRN_CS)
3974 {
3975 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3976 /** @todo try get rid of this carp, it smells and is probably never ever
3977 * used: */
3978 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3979 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3980 {
3981 vmxHCImportGuestCoreRip(pVCpu);
3982 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3983 }
3984 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3985 }
3986 if (a_fWhat & CPUMCTX_EXTRN_SS)
3987 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3988 if (a_fWhat & CPUMCTX_EXTRN_DS)
3989 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3990 if (a_fWhat & CPUMCTX_EXTRN_ES)
3991 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3992 if (a_fWhat & CPUMCTX_EXTRN_FS)
3993 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3994 if (a_fWhat & CPUMCTX_EXTRN_GS)
3995 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3996
3997 /* Guest TR.
3998 Real-mode emulation using virtual-8086 mode has the fake TSS
3999 (pRealModeTSS) in TR, don't need to import that one. */
4000#ifndef IN_NEM_DARWIN
4001 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4002 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4003 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4004#else
4005 if (a_fWhat & CPUMCTX_EXTRN_TR)
4006#endif
4007 vmxHCImportGuestTr(pVCpu);
4008
4009#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4010 if (fRealOnV86Active)
4011 {
4012 if (a_fWhat & CPUMCTX_EXTRN_CS)
4013 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4014 if (a_fWhat & CPUMCTX_EXTRN_SS)
4015 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4016 if (a_fWhat & CPUMCTX_EXTRN_DS)
4017 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4018 if (a_fWhat & CPUMCTX_EXTRN_ES)
4019 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4020 if (a_fWhat & CPUMCTX_EXTRN_FS)
4021 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4022 if (a_fWhat & CPUMCTX_EXTRN_GS)
4023 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4024 }
4025#endif
4026 }
4027
4028 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4029 {
4030 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4031 AssertRC(rc);
4032 }
4033
4034 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4035 vmxHCImportGuestLdtr(pVCpu);
4036
4037 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4038 {
4039 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4040 uint32_t u32Val;
4041 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4042 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4043 }
4044
4045 /* Guest IDTR. */
4046 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4047 {
4048 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4049 uint32_t u32Val;
4050 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4051 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4052 }
4053
4054 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4055 {
4056#ifndef IN_NEM_DARWIN
4057 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4058#endif
4059 {
4060 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4061 AssertRC(rc);
4062 }
4063 }
4064
4065 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4066 {
4067 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4068 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4069 uint32_t u32Val;
4070 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4071 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4072 }
4073
4074#ifndef IN_NEM_DARWIN
4075 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4076 {
4077 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4078 && pVM->hmr0.s.fAllow64BitGuests)
4079 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4080 }
4081
4082 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4083 {
4084 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4085 && pVM->hmr0.s.fAllow64BitGuests)
4086 {
4087 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4088 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4089 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4090 }
4091 }
4092
4093 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4094 {
4095 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4096 AssertRCReturn(rc1, rc1);
4097 }
4098#else
4099 NOREF(pVM);
4100#endif
4101
4102 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4103 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4104
4105 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4106 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4107
4108 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4109 vmxHCImportGuestCr3(pVCpu);
4110
4111#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4112 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4113 {
4114 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4115 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4116 {
4117 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4118 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4119 AssertRCReturn(rc, rc);
4120 }
4121 }
4122#endif
4123
4124 /* Update fExtrn. */
4125 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4126
4127 /* If everything has been imported, clear the HM keeper bit. */
4128 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4129 {
4130#ifndef IN_NEM_DARWIN
4131 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4132#else
4133 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4134#endif
4135 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4136 }
4137
4138 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4139
4140 /*
4141 * Honor any pending CR3 updates.
4142 *
4143 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4144 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4145 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4146 *
4147 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4148 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4149 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4150 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4151 *
4152 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4153 *
4154 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4155 */
4156#ifndef IN_NEM_DARWIN
4157 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4158 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4159 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4160 return VINF_SUCCESS;
4161 ASMSetFlags(fEFlags);
4162#else
4163 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4164 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4165 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4166 return VINF_SUCCESS;
4167 RT_NOREF_PV(fEFlags);
4168#endif
4169
4170 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4171 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4172 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4173 return VINF_SUCCESS;
4174}
4175
4176
4177/**
4178 * Internal state fetcher.
4179 *
4180 * @returns VBox status code.
4181 * @param pVCpu The cross context virtual CPU structure.
4182 * @param pVmcsInfo The VMCS info. object.
4183 * @param pszCaller For logging.
4184 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4185 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4186 * already. This is ORed together with @a a_fWhat when
4187 * calculating what needs fetching (just for safety).
4188 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4189 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4190 * already. This is ORed together with @a a_fWhat when
4191 * calculating what needs fetching (just for safety).
4192 */
4193template<uint64_t const a_fWhat,
4194 uint64_t const a_fDoneLocal = 0,
4195 uint64_t const a_fDonePostExit = 0
4196#ifndef IN_NEM_DARWIN
4197 | CPUMCTX_EXTRN_INHIBIT_INT
4198 | CPUMCTX_EXTRN_INHIBIT_NMI
4199# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4200 | HMVMX_CPUMCTX_EXTRN_ALL
4201# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4202 | CPUMCTX_EXTRN_RFLAGS
4203# endif
4204#else /* IN_NEM_DARWIN */
4205 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4206#endif /* IN_NEM_DARWIN */
4207>
4208DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4209{
4210 RT_NOREF_PV(pszCaller);
4211 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4212 {
4213#ifndef IN_NEM_DARWIN
4214 /*
4215 * We disable interrupts to make the updating of the state and in particular
4216 * the fExtrn modification atomic wrt to preemption hooks.
4217 */
4218 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4219#else
4220 RTCCUINTREG const fEFlags = 0;
4221#endif
4222
4223 /*
4224 * We combine all three parameters and take the (probably) inlined optimized
4225 * code path for the new things specified in a_fWhat.
4226 *
4227 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4228 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4229 * also take the streamlined path when both of these are cleared in fExtrn
4230 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4231 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4232 */
4233 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4234 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4235 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4236 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4237 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4238 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4239 {
4240 int const rc = vmxHCImportGuestStateInner< a_fWhat
4241 & HMVMX_CPUMCTX_EXTRN_ALL
4242 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4243#ifndef IN_NEM_DARWIN
4244 ASMSetFlags(fEFlags);
4245#endif
4246 return rc;
4247 }
4248
4249#ifndef IN_NEM_DARWIN
4250 ASMSetFlags(fEFlags);
4251#endif
4252
4253 /*
4254 * We shouldn't normally get here, but it may happen when executing
4255 * in the debug run-loops. Typically, everything should already have
4256 * been fetched then. Otherwise call the fallback state import function.
4257 */
4258 if (fWhatToDo == 0)
4259 { /* hope the cause was the debug loop or something similar */ }
4260 else
4261 {
4262 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4263 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4264 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4265 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4266 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4267 }
4268 }
4269 return VINF_SUCCESS;
4270}
4271
4272
4273/**
4274 * Check per-VM and per-VCPU force flag actions that require us to go back to
4275 * ring-3 for one reason or another.
4276 *
4277 * @returns Strict VBox status code (i.e. informational status codes too)
4278 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4279 * ring-3.
4280 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4281 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4282 * interrupts)
4283 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4284 * all EMTs to be in ring-3.
4285 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4286 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4287 * to the EM loop.
4288 *
4289 * @param pVCpu The cross context virtual CPU structure.
4290 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4291 * @param fStepping Whether we are single-stepping the guest using the
4292 * hypervisor debugger.
4293 *
4294 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4295 * is no longer in VMX non-root mode.
4296 */
4297static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4298{
4299#ifndef IN_NEM_DARWIN
4300 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4301#endif
4302
4303 /*
4304 * Update pending interrupts into the APIC's IRR.
4305 */
4306 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4307 APICUpdatePendingInterrupts(pVCpu);
4308
4309 /*
4310 * Anything pending? Should be more likely than not if we're doing a good job.
4311 */
4312 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4313 if ( !fStepping
4314 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4315 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4316 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4317 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4318 return VINF_SUCCESS;
4319
4320 /* Pending PGM C3 sync. */
4321 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4322 {
4323 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4324 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4325 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4326 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4327 if (rcStrict != VINF_SUCCESS)
4328 {
4329 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4330 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4331 return rcStrict;
4332 }
4333 }
4334
4335 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4336 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4337 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4338 {
4339 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4340 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4341 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4342 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4343 return rc;
4344 }
4345
4346 /* Pending VM request packets, such as hardware interrupts. */
4347 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4348 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4349 {
4350 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4351 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4352 return VINF_EM_PENDING_REQUEST;
4353 }
4354
4355 /* Pending PGM pool flushes. */
4356 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4357 {
4358 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4359 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4360 return VINF_PGM_POOL_FLUSH_PENDING;
4361 }
4362
4363 /* Pending DMA requests. */
4364 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4365 {
4366 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4367 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4368 return VINF_EM_RAW_TO_R3;
4369 }
4370
4371#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4372 /*
4373 * Pending nested-guest events.
4374 *
4375 * Please note the priority of these events are specified and important.
4376 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4377 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4378 *
4379 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be
4380 * handled here. They'll be handled by the hardware while executing the nested-guest
4381 * or by us when we injecting events that are not part of VM-entry of the nested-guest.
4382 */
4383 if (fIsNestedGuest)
4384 {
4385 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */
4386 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4387 {
4388 Log4Func(("Pending nested-guest APIC-write\n"));
4389 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4390 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4391 if ( rcStrict == VINF_SUCCESS
4392 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4393 return rcStrict;
4394 }
4395
4396 /* Pending nested-guest monitor-trap flag (MTF). */
4397 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4398 {
4399 Log4Func(("Pending nested-guest MTF\n"));
4400 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4401 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4402 return rcStrict;
4403 }
4404
4405 /* Pending nested-guest VMX-preemption timer expired. */
4406 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4407 {
4408 Log4Func(("Pending nested-guest preempt timer\n"));
4409 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4410 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4411 return rcStrict;
4412 }
4413 }
4414#else
4415 NOREF(fIsNestedGuest);
4416#endif
4417
4418 return VINF_SUCCESS;
4419}
4420
4421
4422/**
4423 * Converts any TRPM trap into a pending HM event. This is typically used when
4424 * entering from ring-3 (not longjmp returns).
4425 *
4426 * @param pVCpu The cross context virtual CPU structure.
4427 */
4428static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4429{
4430 Assert(TRPMHasTrap(pVCpu));
4431 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4432
4433 uint8_t uVector;
4434 TRPMEVENT enmTrpmEvent;
4435 uint32_t uErrCode;
4436 RTGCUINTPTR GCPtrFaultAddress;
4437 uint8_t cbInstr;
4438 bool fIcebp;
4439
4440 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4441 AssertRC(rc);
4442
4443 uint32_t u32IntInfo;
4444 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4445 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4446
4447 rc = TRPMResetTrap(pVCpu);
4448 AssertRC(rc);
4449 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4450 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4451
4452 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4453}
4454
4455
4456/**
4457 * Converts the pending HM event into a TRPM trap.
4458 *
4459 * @param pVCpu The cross context virtual CPU structure.
4460 */
4461static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4462{
4463 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4464
4465 /* If a trap was already pending, we did something wrong! */
4466 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4467
4468 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4469 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4470 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4471
4472 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4473
4474 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4475 AssertRC(rc);
4476
4477 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4478 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4479
4480 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4481 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4482 else
4483 {
4484 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4485 switch (uVectorType)
4486 {
4487 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4488 TRPMSetTrapDueToIcebp(pVCpu);
4489 RT_FALL_THRU();
4490 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4491 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4492 {
4493 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4494 || ( uVector == X86_XCPT_BP /* INT3 */
4495 || uVector == X86_XCPT_OF /* INTO */
4496 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4497 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4498 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4499 break;
4500 }
4501 }
4502 }
4503
4504 /* We're now done converting the pending event. */
4505 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4506}
4507
4508
4509/**
4510 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4511 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4512 *
4513 * @param pVCpu The cross context virtual CPU structure.
4514 * @param pVmcsInfo The VMCS info. object.
4515 */
4516static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4517{
4518 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4519 {
4520 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4521 {
4522 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4523 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4524 AssertRC(rc);
4525 }
4526 Log4Func(("Enabled interrupt-window exiting\n"));
4527 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4528}
4529
4530
4531/**
4532 * Clears the interrupt-window exiting control in the VMCS.
4533 *
4534 * @param pVCpu The cross context virtual CPU structure.
4535 * @param pVmcsInfo The VMCS info. object.
4536 */
4537DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4538{
4539 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4540 {
4541 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4542 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4543 AssertRC(rc);
4544 Log4Func(("Disabled interrupt-window exiting\n"));
4545 }
4546}
4547
4548
4549/**
4550 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4551 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4552 *
4553 * @param pVCpu The cross context virtual CPU structure.
4554 * @param pVmcsInfo The VMCS info. object.
4555 */
4556static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4557{
4558 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4559 {
4560 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4561 {
4562 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4563 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4564 AssertRC(rc);
4565 Log4Func(("Enabled NMI-window exiting\n"));
4566 }
4567 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4568}
4569
4570
4571/**
4572 * Clears the NMI-window exiting control in the VMCS.
4573 *
4574 * @param pVCpu The cross context virtual CPU structure.
4575 * @param pVmcsInfo The VMCS info. object.
4576 */
4577DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4578{
4579 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4580 {
4581 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4582 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4583 AssertRC(rc);
4584 Log4Func(("Disabled NMI-window exiting\n"));
4585 }
4586}
4587
4588
4589/**
4590 * Injects an event into the guest upon VM-entry by updating the relevant fields
4591 * in the VM-entry area in the VMCS.
4592 *
4593 * @returns Strict VBox status code (i.e. informational status codes too).
4594 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4595 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4596 *
4597 * @param pVCpu The cross context virtual CPU structure.
4598 * @param pVmcsInfo The VMCS info object.
4599 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4600 * @param pEvent The event being injected.
4601 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4602 * will be updated if necessary. This cannot not be NULL.
4603 * @param fStepping Whether we're single-stepping guest execution and should
4604 * return VINF_EM_DBG_STEPPED if the event is injected
4605 * directly (registers modified by us, not by hardware on
4606 * VM-entry).
4607 */
4608static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4609 bool fStepping, uint32_t *pfIntrState)
4610{
4611 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4612 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4613 Assert(pfIntrState);
4614
4615#ifdef IN_NEM_DARWIN
4616 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4617#endif
4618
4619 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4620 uint32_t u32IntInfo = pEvent->u64IntInfo;
4621 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4622 uint32_t const cbInstr = pEvent->cbInstr;
4623 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4624 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4625 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4626
4627#ifdef VBOX_STRICT
4628 /*
4629 * Validate the error-code-valid bit for hardware exceptions.
4630 * No error codes for exceptions in real-mode.
4631 *
4632 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4633 */
4634 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4635 && !CPUMIsGuestInRealModeEx(pCtx))
4636 {
4637 switch (uVector)
4638 {
4639 case X86_XCPT_PF:
4640 case X86_XCPT_DF:
4641 case X86_XCPT_TS:
4642 case X86_XCPT_NP:
4643 case X86_XCPT_SS:
4644 case X86_XCPT_GP:
4645 case X86_XCPT_AC:
4646 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4647 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4648 RT_FALL_THRU();
4649 default:
4650 break;
4651 }
4652 }
4653
4654 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4655 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4656 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4657#endif
4658
4659 RT_NOREF(uVector);
4660 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4661 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4662 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4663 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4664 {
4665 Assert(uVector <= X86_XCPT_LAST);
4666 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4667 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4668 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4669 }
4670 else
4671 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4672
4673 /*
4674 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4675 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4676 * interrupt handler in the (real-mode) guest.
4677 *
4678 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4679 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4680 */
4681 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4682 {
4683#ifndef IN_NEM_DARWIN
4684 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4685#endif
4686 {
4687 /*
4688 * For CPUs with unrestricted guest execution enabled and with the guest
4689 * in real-mode, we must not set the deliver-error-code bit.
4690 *
4691 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4692 */
4693 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4694 }
4695#ifndef IN_NEM_DARWIN
4696 else
4697 {
4698 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4699 Assert(PDMVmmDevHeapIsEnabled(pVM));
4700 Assert(pVM->hm.s.vmx.pRealModeTSS);
4701 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4702
4703 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4704 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4705 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4706 AssertRCReturn(rc2, rc2);
4707
4708 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4709 size_t const cbIdtEntry = sizeof(X86IDTR16);
4710 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4711 {
4712 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4713 if (uVector == X86_XCPT_DF)
4714 return VINF_EM_RESET;
4715
4716 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4717 No error codes for exceptions in real-mode. */
4718 if (uVector == X86_XCPT_GP)
4719 {
4720 static HMEVENT const s_EventXcptDf
4721 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4722 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4723 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4724 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4725 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4726 }
4727
4728 /*
4729 * If we're injecting an event with no valid IDT entry, inject a #GP.
4730 * No error codes for exceptions in real-mode.
4731 *
4732 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4733 */
4734 static HMEVENT const s_EventXcptGp
4735 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4736 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4737 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4738 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4739 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4740 }
4741
4742 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4743 uint16_t uGuestIp = pCtx->ip;
4744 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4745 {
4746 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4747 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4748 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4749 }
4750 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4751 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4752
4753 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4754 X86IDTR16 IdtEntry;
4755 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4756 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4757 AssertRCReturn(rc2, rc2);
4758
4759 /* Construct the stack frame for the interrupt/exception handler. */
4760 VBOXSTRICTRC rcStrict;
4761 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4762 if (rcStrict == VINF_SUCCESS)
4763 {
4764 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4765 if (rcStrict == VINF_SUCCESS)
4766 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4767 }
4768
4769 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4770 if (rcStrict == VINF_SUCCESS)
4771 {
4772 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4773 pCtx->rip = IdtEntry.offSel;
4774 pCtx->cs.Sel = IdtEntry.uSel;
4775 pCtx->cs.ValidSel = IdtEntry.uSel;
4776 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4777 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4778 && uVector == X86_XCPT_PF)
4779 pCtx->cr2 = GCPtrFault;
4780
4781 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4782 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4783 | HM_CHANGED_GUEST_RSP);
4784
4785 /*
4786 * If we delivered a hardware exception (other than an NMI) and if there was
4787 * block-by-STI in effect, we should clear it.
4788 */
4789 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4790 {
4791 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4792 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4793 Log4Func(("Clearing inhibition due to STI\n"));
4794 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4795 }
4796
4797 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4798 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4799
4800 /*
4801 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4802 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4803 */
4804 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4805
4806 /*
4807 * If we eventually support nested-guest execution without unrestricted guest execution,
4808 * we should set fInterceptEvents here.
4809 */
4810 Assert(!fIsNestedGuest);
4811
4812 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4813 if (fStepping)
4814 rcStrict = VINF_EM_DBG_STEPPED;
4815 }
4816 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4817 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4818 return rcStrict;
4819 }
4820#else
4821 RT_NOREF(pVmcsInfo);
4822#endif
4823 }
4824
4825 /*
4826 * Validate.
4827 */
4828 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4829 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4830
4831 /*
4832 * Inject the event into the VMCS.
4833 */
4834 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4835 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4836 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4837 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4838 AssertRC(rc);
4839
4840 /*
4841 * Update guest CR2 if this is a page-fault.
4842 */
4843 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4844 pCtx->cr2 = GCPtrFault;
4845
4846 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4847 return VINF_SUCCESS;
4848}
4849
4850
4851/**
4852 * Evaluates the event to be delivered to the guest and sets it as the pending
4853 * event.
4854 *
4855 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4856 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4857 * NOT restore these force-flags.
4858 *
4859 * @returns Strict VBox status code (i.e. informational status codes too).
4860 * @param pVCpu The cross context virtual CPU structure.
4861 * @param pVmcsInfo The VMCS information structure.
4862 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4863 */
4864static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4865{
4866 Assert(pfIntrState);
4867 Assert(!TRPMHasTrap(pVCpu));
4868
4869 /*
4870 * Compute/update guest-interruptibility state related FFs.
4871 * The FFs will be used below while evaluating events to be injected.
4872 */
4873 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4874
4875 /*
4876 * Evaluate if a new event needs to be injected.
4877 * An event that's already pending has already performed all necessary checks.
4878 */
4879 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4880 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4881 {
4882 /** @todo SMI. SMIs take priority over NMIs. */
4883
4884 /*
4885 * NMIs.
4886 * NMIs take priority over external interrupts.
4887 */
4888 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4889 {
4890 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4891 {
4892 /* Finally, inject the NMI and we're done. */
4893 vmxHCSetPendingXcptNmi(pVCpu);
4894 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4895 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4896 return VINF_SUCCESS;
4897 }
4898
4899 /*
4900 * Setup NMI-window exiting and also clear any interrupt-window exiting that might
4901 * still be active. This can happen if we got VM-exits that were higher priority
4902 * than an interrupt-window VM-exit.
4903 */
4904 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4905 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4906 }
4907 else
4908 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4909
4910 /*
4911 * External interrupts (PIC/APIC).
4912 */
4913 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4914 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4915 {
4916 Assert(!DBGFIsStepping(pVCpu));
4917 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4918 AssertRC(rc);
4919
4920 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF)
4921 {
4922 /*
4923 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it.
4924 * We cannot re-request the interrupt from the controller again.
4925 */
4926 uint8_t u8Interrupt;
4927 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4928 if (RT_SUCCESS(rc))
4929 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4930 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4931 {
4932 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4933 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4934 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4935 /*
4936 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4937 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4938 * need to re-set this force-flag here.
4939 */
4940 }
4941 else
4942 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4943
4944 /* We must clear interrupt-window exiting for the same reason mentioned above for NMIs. */
4945 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4946 return VINF_SUCCESS;
4947 }
4948
4949 /* Setup interrupt-window exiting. */
4950 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4951 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4952 }
4953 else
4954 {
4955 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4956 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4957 }
4958 }
4959 else
4960 {
4961 /*
4962 * An event is being injected or we are in an interrupt shadow.
4963 * If another event is pending currently, instruct VT-x to cause a VM-exit as
4964 * soon as the guest is ready to accept it.
4965 */
4966 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4967 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4968 else
4969 {
4970 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4971 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4972 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4973 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4974 else
4975 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT));
4976 }
4977 }
4978
4979 return VINF_SUCCESS;
4980}
4981
4982
4983#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4984/**
4985 * Evaluates the event to be delivered to the nested-guest and sets it as the
4986 * pending event.
4987 *
4988 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4989 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4990 * NOT restore these force-flags.
4991 *
4992 * @returns Strict VBox status code (i.e. informational status codes too).
4993 * @param pVCpu The cross context virtual CPU structure.
4994 * @param pVmcsInfo The VMCS information structure.
4995 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4996 *
4997 * @remarks The guest must be in VMX non-root mode.
4998 */
4999static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
5000{
5001 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5002
5003 Assert(pfIntrState);
5004 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
5005 Assert(!TRPMHasTrap(pVCpu));
5006
5007 /*
5008 * Compute/update guest-interruptibility state related FFs.
5009 * The FFs will be used below while evaluating events to be injected.
5010 */
5011 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
5012
5013 /*
5014 * If we are injecting an event, we must not setup any interrupt/NMI-window
5015 * exiting or we would get into an infinite VM-exit loop. An event that's
5016 * already pending has already performed all necessary checks.
5017 */
5018 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5019 return VINF_SUCCESS;
5020
5021 /*
5022 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been
5023 * made pending (TRPM to HM event) and would be handled above if we resumed
5024 * execution in HM. If somehow we fell back to emulation after the
5025 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt
5026 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX
5027 * intercepts should be active and any events pending here have been generated
5028 * while executing the guest in VMX non-root mode after virtual VM-entry completed.
5029 */
5030 Assert(CPUMIsGuestVmxInterceptEvents(pCtx));
5031
5032 /*
5033 * Interrupt shadows can also block NMIs. If we are in an interrupt shadow there's
5034 * nothing more to do here.
5035 *
5036 * See Intel spec. 24.4.2 "Guest Non-Register State".
5037 * See Intel spec. 25.4.1 "Event Blocking".
5038 */
5039 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
5040 { /* likely */ }
5041 else
5042 return VINF_SUCCESS;
5043
5044 /** @todo SMI. SMIs take priority over NMIs. */
5045
5046 /*
5047 * NMIs.
5048 * NMIs take priority over external interrupts.
5049 *
5050 * NMI blocking is in effect after delivering an NMI until the execution of IRET.
5051 * Only when there isn't any NMI blocking can an NMI-window VM-exit or delivery of an NMI happen.
5052 */
5053 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
5054 {
5055 /*
5056 * Nested-guest NMI-window exiting.
5057 * The NMI-window exit must happen regardless of whether an NMI is pending
5058 * provided virtual-NMI blocking is not in effect.
5059 *
5060 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5061 */
5062 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
5063 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx))
5064 {
5065 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5066 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
5067 }
5068
5069 /*
5070 * For a nested-guest, the FF always indicates the outer guest's ability to
5071 * receive an NMI while the guest-interruptibility state bit depends on whether
5072 * the nested-hypervisor is using virtual-NMIs.
5073 *
5074 * It is very important that we also clear the force-flag if we are causing
5075 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal
5076 * with re-injecting or discarding the NMI. This fixes the bug that showed up
5077 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}.
5078 */
5079 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5080 {
5081 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
5082 return IEMExecVmxVmexitXcptNmi(pVCpu);
5083 vmxHCSetPendingXcptNmi(pVCpu);
5084 return VINF_SUCCESS;
5085 }
5086 }
5087
5088 /*
5089 * Nested-guest interrupt-window exiting.
5090 *
5091 * We must cause the interrupt-window exit regardless of whether an interrupt is pending
5092 * provided virtual interrupts are enabled.
5093 *
5094 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5095 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5096 */
5097 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
5098 && CPUMIsGuestVmxVirtIntrEnabled(pCtx))
5099 {
5100 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
5101 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
5102 }
5103
5104 /*
5105 * External interrupts (PIC/APIC).
5106 *
5107 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF.
5108 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always.
5109 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued
5110 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5111 *
5112 * See Intel spec. 25.4.1 "Event Blocking".
5113 */
5114 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5115 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
5116 && CPUMIsGuestVmxPhysIntrEnabled(pCtx))
5117 {
5118 Assert(!DBGFIsStepping(pVCpu));
5119
5120 /* Nested-guest external interrupt VM-exit. */
5121 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
5122 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
5123 {
5124 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5125 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5126 return rcStrict;
5127 }
5128
5129 /*
5130 * Fetch the external interrupt from the interrupt controller.
5131 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to
5132 * the nested-hypervisor. We cannot re-request the interrupt from the controller again.
5133 */
5134 uint8_t u8Interrupt;
5135 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5136 if (RT_SUCCESS(rc))
5137 {
5138 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */
5139 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5140 {
5141 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT));
5142 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5143 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5144 return rcStrict;
5145 }
5146 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5147 return VINF_SUCCESS;
5148 }
5149 }
5150 return VINF_SUCCESS;
5151}
5152#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5153
5154
5155/**
5156 * Injects any pending events into the guest if the guest is in a state to
5157 * receive them.
5158 *
5159 * @returns Strict VBox status code (i.e. informational status codes too).
5160 * @param pVCpu The cross context virtual CPU structure.
5161 * @param pVmcsInfo The VMCS information structure.
5162 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5163 * @param fIntrState The VT-x guest-interruptibility state.
5164 * @param fStepping Whether we are single-stepping the guest using the
5165 * hypervisor debugger and should return
5166 * VINF_EM_DBG_STEPPED if the event was dispatched
5167 * directly.
5168 */
5169static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5170 uint32_t fIntrState, bool fStepping)
5171{
5172 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5173#ifndef IN_NEM_DARWIN
5174 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5175#endif
5176
5177#ifdef VBOX_STRICT
5178 /*
5179 * Verify guest-interruptibility state.
5180 *
5181 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5182 * since injecting an event may modify the interruptibility state and we must thus always
5183 * use fIntrState.
5184 */
5185 {
5186 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5187 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5188 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5189 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5190 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5191 Assert(!TRPMHasTrap(pVCpu));
5192 NOREF(fBlockMovSS); NOREF(fBlockSti);
5193 }
5194#endif
5195
5196 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5197 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5198 {
5199 /*
5200 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5201 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5202 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5203 *
5204 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5205 */
5206 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5207#ifdef VBOX_STRICT
5208 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5209 {
5210 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5211 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5212 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5213 }
5214 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5215 {
5216 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5217 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5218 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5219 }
5220#endif
5221 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5222 uIntType));
5223
5224 /*
5225 * Inject the event and get any changes to the guest-interruptibility state.
5226 *
5227 * The guest-interruptibility state may need to be updated if we inject the event
5228 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5229 */
5230 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5231 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5232
5233 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5234 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5235 else
5236 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5237 }
5238
5239 /*
5240 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5241 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5242 */
5243 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5244 && !fIsNestedGuest)
5245 {
5246 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5247
5248 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5249 {
5250 /*
5251 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5252 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5253 */
5254 Assert(!DBGFIsStepping(pVCpu));
5255 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5256 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5257 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5258 AssertRC(rc);
5259 }
5260 else
5261 {
5262 /*
5263 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5264 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5265 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5266 * we use MTF, so just make sure it's called before executing guest-code.
5267 */
5268 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5269 }
5270 }
5271 /* else: for nested-guest currently handling while merging controls. */
5272
5273 /*
5274 * Finally, update the guest-interruptibility state.
5275 *
5276 * This is required for the real-on-v86 software interrupt injection, for
5277 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5278 */
5279 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5280 AssertRC(rc);
5281
5282 /*
5283 * There's no need to clear the VM-entry interruption-information field here if we're not
5284 * injecting anything. VT-x clears the valid bit on every VM-exit.
5285 *
5286 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5287 */
5288
5289 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5290 return rcStrict;
5291}
5292
5293
5294/**
5295 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5296 * and update error record fields accordingly.
5297 *
5298 * @returns VMX_IGS_* error codes.
5299 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5300 * wrong with the guest state.
5301 *
5302 * @param pVCpu The cross context virtual CPU structure.
5303 * @param pVmcsInfo The VMCS info. object.
5304 *
5305 * @remarks This function assumes our cache of the VMCS controls
5306 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5307 */
5308static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5309{
5310#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5311#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5312
5313 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5314 uint32_t uError = VMX_IGS_ERROR;
5315 uint32_t u32IntrState = 0;
5316#ifndef IN_NEM_DARWIN
5317 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5318 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5319#else
5320 bool const fUnrestrictedGuest = true;
5321#endif
5322 do
5323 {
5324 int rc;
5325
5326 /*
5327 * Guest-interruptibility state.
5328 *
5329 * Read this first so that any check that fails prior to those that actually
5330 * require the guest-interruptibility state would still reflect the correct
5331 * VMCS value and avoids causing further confusion.
5332 */
5333 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5334 AssertRC(rc);
5335
5336 uint32_t u32Val;
5337 uint64_t u64Val;
5338
5339 /*
5340 * CR0.
5341 */
5342 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5343 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5344 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5345 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5346 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5347 if (fUnrestrictedGuest)
5348 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5349
5350 uint64_t u64GuestCr0;
5351 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5352 AssertRC(rc);
5353 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5354 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5355 if ( !fUnrestrictedGuest
5356 && (u64GuestCr0 & X86_CR0_PG)
5357 && !(u64GuestCr0 & X86_CR0_PE))
5358 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5359
5360 /*
5361 * CR4.
5362 */
5363 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5364 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5365 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5366
5367 uint64_t u64GuestCr4;
5368 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5369 AssertRC(rc);
5370 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5371 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5372
5373 /*
5374 * IA32_DEBUGCTL MSR.
5375 */
5376 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5377 AssertRC(rc);
5378 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5379 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5380 {
5381 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5382 }
5383 uint64_t u64DebugCtlMsr = u64Val;
5384
5385#ifdef VBOX_STRICT
5386 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5387 AssertRC(rc);
5388 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5389#endif
5390 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5391
5392 /*
5393 * RIP and RFLAGS.
5394 */
5395 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5396 AssertRC(rc);
5397 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5398 if ( !fLongModeGuest
5399 || !pCtx->cs.Attr.n.u1Long)
5400 {
5401 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5402 }
5403 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5404 * must be identical if the "IA-32e mode guest" VM-entry
5405 * control is 1 and CS.L is 1. No check applies if the
5406 * CPU supports 64 linear-address bits. */
5407
5408 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5409 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5410 AssertRC(rc);
5411 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5412 VMX_IGS_RFLAGS_RESERVED);
5413 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5414 uint32_t const u32Eflags = u64Val;
5415
5416 if ( fLongModeGuest
5417 || ( fUnrestrictedGuest
5418 && !(u64GuestCr0 & X86_CR0_PE)))
5419 {
5420 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5421 }
5422
5423 uint32_t u32EntryInfo;
5424 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5425 AssertRC(rc);
5426 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5427 {
5428 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5429 }
5430
5431 /*
5432 * 64-bit checks.
5433 */
5434 if (fLongModeGuest)
5435 {
5436 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5437 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5438 }
5439
5440 if ( !fLongModeGuest
5441 && (u64GuestCr4 & X86_CR4_PCIDE))
5442 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5443
5444 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5445 * 51:32 beyond the processor's physical-address width are 0. */
5446
5447 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5448 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5449 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5450
5451#ifndef IN_NEM_DARWIN
5452 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5453 AssertRC(rc);
5454 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5455
5456 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5457 AssertRC(rc);
5458 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5459#endif
5460
5461 /*
5462 * PERF_GLOBAL MSR.
5463 */
5464 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5465 {
5466 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5467 AssertRC(rc);
5468 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5469 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5470 }
5471
5472 /*
5473 * PAT MSR.
5474 */
5475 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5476 {
5477 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5478 AssertRC(rc);
5479 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5480 for (unsigned i = 0; i < 8; i++)
5481 {
5482 uint8_t u8Val = (u64Val & 0xff);
5483 if ( u8Val != 0 /* UC */
5484 && u8Val != 1 /* WC */
5485 && u8Val != 4 /* WT */
5486 && u8Val != 5 /* WP */
5487 && u8Val != 6 /* WB */
5488 && u8Val != 7 /* UC- */)
5489 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5490 u64Val >>= 8;
5491 }
5492 }
5493
5494 /*
5495 * EFER MSR.
5496 */
5497 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5498 {
5499 Assert(g_fHmVmxSupportsVmcsEfer);
5500 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5501 AssertRC(rc);
5502 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5503 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5504 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5505 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5506 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5507 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5508 * iemVmxVmentryCheckGuestState(). */
5509 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5510 || !(u64GuestCr0 & X86_CR0_PG)
5511 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5512 VMX_IGS_EFER_LMA_LME_MISMATCH);
5513 }
5514
5515 /*
5516 * Segment registers.
5517 */
5518 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5519 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5520 if (!(u32Eflags & X86_EFL_VM))
5521 {
5522 /* CS */
5523 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5524 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5525 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5526 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5527 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5528 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5529 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5530 /* CS cannot be loaded with NULL in protected mode. */
5531 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5532 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5533 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5534 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5535 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5536 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5537 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5538 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5539 else
5540 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5541
5542 /* SS */
5543 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5544 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5545 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5546 if ( !(pCtx->cr0 & X86_CR0_PE)
5547 || pCtx->cs.Attr.n.u4Type == 3)
5548 {
5549 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5550 }
5551
5552 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5553 {
5554 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5555 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5556 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5557 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5558 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5559 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5560 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5561 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5562 }
5563
5564 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5565 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5566 {
5567 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5568 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5569 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5570 || pCtx->ds.Attr.n.u4Type > 11
5571 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5572 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5573 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5574 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5575 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5576 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5577 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5578 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5579 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5580 }
5581 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5582 {
5583 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5584 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5585 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5586 || pCtx->es.Attr.n.u4Type > 11
5587 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5588 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5589 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5590 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5591 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5592 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5593 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5594 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5595 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5596 }
5597 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5598 {
5599 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5600 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5601 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5602 || pCtx->fs.Attr.n.u4Type > 11
5603 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5604 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5605 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5606 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5607 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5608 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5609 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5610 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5611 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5612 }
5613 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5614 {
5615 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5616 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5617 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5618 || pCtx->gs.Attr.n.u4Type > 11
5619 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5620 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5621 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5622 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5623 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5624 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5625 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5626 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5627 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5628 }
5629 /* 64-bit capable CPUs. */
5630 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5631 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5632 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5633 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5634 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5635 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5636 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5637 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5638 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5639 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5640 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5641 }
5642 else
5643 {
5644 /* V86 mode checks. */
5645 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5646 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5647 {
5648 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5649 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5650 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5651 }
5652 else
5653 {
5654 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5655 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5656 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5657 }
5658
5659 /* CS */
5660 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5661 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5662 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5663 /* SS */
5664 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5665 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5666 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5667 /* DS */
5668 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5669 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5670 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5671 /* ES */
5672 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5673 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5674 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5675 /* FS */
5676 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5677 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5678 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5679 /* GS */
5680 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5681 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5682 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5683 /* 64-bit capable CPUs. */
5684 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5685 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5686 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5687 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5688 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5689 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5690 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5691 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5692 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5693 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5694 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5695 }
5696
5697 /*
5698 * TR.
5699 */
5700 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5701 /* 64-bit capable CPUs. */
5702 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5703 if (fLongModeGuest)
5704 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5705 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5706 else
5707 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5708 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5709 VMX_IGS_TR_ATTR_TYPE_INVALID);
5710 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5711 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5712 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5713 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5714 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5715 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5716 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5717 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5718
5719 /*
5720 * GDTR and IDTR (64-bit capable checks).
5721 */
5722 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5723 AssertRC(rc);
5724 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5725
5726 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5727 AssertRC(rc);
5728 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5729
5730 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5731 AssertRC(rc);
5732 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5733
5734 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5735 AssertRC(rc);
5736 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5737
5738 /*
5739 * Guest Non-Register State.
5740 */
5741 /* Activity State. */
5742 uint32_t u32ActivityState;
5743 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5744 AssertRC(rc);
5745 HMVMX_CHECK_BREAK( !u32ActivityState
5746 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5747 VMX_IGS_ACTIVITY_STATE_INVALID);
5748 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5749 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5750
5751 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5752 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5753 {
5754 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5755 }
5756
5757 /** @todo Activity state and injecting interrupts. Left as a todo since we
5758 * currently don't use activity states but ACTIVE. */
5759
5760 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5761 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5762
5763 /* Guest interruptibility-state. */
5764 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5765 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5766 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5767 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5768 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5769 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5770 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5771 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5772 {
5773 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5774 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5775 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5776 }
5777 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5778 {
5779 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5780 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5781 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5782 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5783 }
5784 /** @todo Assumes the processor is not in SMM. */
5785 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5786 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5787 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5788 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5789 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5790 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5791 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5792 {
5793 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5794 }
5795
5796 /* Pending debug exceptions. */
5797 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5798 AssertRC(rc);
5799 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5800 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5801 u32Val = u64Val; /* For pending debug exceptions checks below. */
5802
5803 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5804 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5805 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5806 {
5807 if ( (u32Eflags & X86_EFL_TF)
5808 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5809 {
5810 /* Bit 14 is PendingDebug.BS. */
5811 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5812 }
5813 if ( !(u32Eflags & X86_EFL_TF)
5814 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5815 {
5816 /* Bit 14 is PendingDebug.BS. */
5817 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5818 }
5819 }
5820
5821#ifndef IN_NEM_DARWIN
5822 /* VMCS link pointer. */
5823 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5824 AssertRC(rc);
5825 if (u64Val != UINT64_C(0xffffffffffffffff))
5826 {
5827 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5828 /** @todo Bits beyond the processor's physical-address width MBZ. */
5829 /** @todo SMM checks. */
5830 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5831 Assert(pVmcsInfo->pvShadowVmcs);
5832 VMXVMCSREVID VmcsRevId;
5833 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5834 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5835 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5836 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5837 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5838 }
5839
5840 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5841 * not using nested paging? */
5842 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5843 && !fLongModeGuest
5844 && CPUMIsGuestInPAEModeEx(pCtx))
5845 {
5846 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5847 AssertRC(rc);
5848 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5849
5850 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5851 AssertRC(rc);
5852 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5853
5854 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5855 AssertRC(rc);
5856 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5857
5858 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5859 AssertRC(rc);
5860 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5861 }
5862#endif
5863
5864 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5865 if (uError == VMX_IGS_ERROR)
5866 uError = VMX_IGS_REASON_NOT_FOUND;
5867 } while (0);
5868
5869 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5870 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5871 return uError;
5872
5873#undef HMVMX_ERROR_BREAK
5874#undef HMVMX_CHECK_BREAK
5875}
5876
5877
5878#ifndef HMVMX_USE_FUNCTION_TABLE
5879/**
5880 * Handles a guest VM-exit from hardware-assisted VMX execution.
5881 *
5882 * @returns Strict VBox status code (i.e. informational status codes too).
5883 * @param pVCpu The cross context virtual CPU structure.
5884 * @param pVmxTransient The VMX-transient structure.
5885 */
5886DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5887{
5888#ifdef DEBUG_ramshankar
5889# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5890 do { \
5891 if (a_fSave != 0) \
5892 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5893 VBOXSTRICTRC rcStrict = a_CallExpr; \
5894 if (a_fSave != 0) \
5895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5896 return rcStrict; \
5897 } while (0)
5898#else
5899# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5900#endif
5901 uint32_t const uExitReason = pVmxTransient->uExitReason;
5902 switch (uExitReason)
5903 {
5904 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5905 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5906 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5907 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5908 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5909 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5910 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5911 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5912 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5913 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5914 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5915 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5916 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5917 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5918 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5919 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5920 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5921 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5922 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5923 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5924 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5925 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5926 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5927 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5928 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5929 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5930 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5931 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5932 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5933 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5934#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5935 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5936 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5937 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5938 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5939 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5940 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5941 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5942 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5943 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5944 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5945#else
5946 case VMX_EXIT_VMCLEAR:
5947 case VMX_EXIT_VMLAUNCH:
5948 case VMX_EXIT_VMPTRLD:
5949 case VMX_EXIT_VMPTRST:
5950 case VMX_EXIT_VMREAD:
5951 case VMX_EXIT_VMRESUME:
5952 case VMX_EXIT_VMWRITE:
5953 case VMX_EXIT_VMXOFF:
5954 case VMX_EXIT_VMXON:
5955 case VMX_EXIT_INVVPID:
5956 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5957#endif
5958#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5959 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5960#else
5961 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5962#endif
5963
5964 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5965 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5966 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5967
5968 case VMX_EXIT_INIT_SIGNAL:
5969 case VMX_EXIT_SIPI:
5970 case VMX_EXIT_IO_SMI:
5971 case VMX_EXIT_SMI:
5972 case VMX_EXIT_ERR_MSR_LOAD:
5973 case VMX_EXIT_ERR_MACHINE_CHECK:
5974 case VMX_EXIT_PML_FULL:
5975 case VMX_EXIT_VIRTUALIZED_EOI:
5976 case VMX_EXIT_GDTR_IDTR_ACCESS:
5977 case VMX_EXIT_LDTR_TR_ACCESS:
5978 case VMX_EXIT_APIC_WRITE:
5979 case VMX_EXIT_RDRAND:
5980 case VMX_EXIT_RSM:
5981 case VMX_EXIT_VMFUNC:
5982 case VMX_EXIT_ENCLS:
5983 case VMX_EXIT_RDSEED:
5984 case VMX_EXIT_XSAVES:
5985 case VMX_EXIT_XRSTORS:
5986 case VMX_EXIT_UMWAIT:
5987 case VMX_EXIT_TPAUSE:
5988 case VMX_EXIT_LOADIWKEY:
5989 default:
5990 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5991 }
5992#undef VMEXIT_CALL_RET
5993}
5994#endif /* !HMVMX_USE_FUNCTION_TABLE */
5995
5996
5997#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5998/**
5999 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
6000 *
6001 * @returns Strict VBox status code (i.e. informational status codes too).
6002 * @param pVCpu The cross context virtual CPU structure.
6003 * @param pVmxTransient The VMX-transient structure.
6004 */
6005DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6006{
6007#ifdef DEBUG_ramshankar
6008# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
6009 do { \
6010 if (a_fSave != 0) \
6011 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
6012 VBOXSTRICTRC rcStrict = a_CallExpr; \
6013 return rcStrict; \
6014 } while (0)
6015#else
6016# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
6017#endif
6018
6019 uint32_t const uExitReason = pVmxTransient->uExitReason;
6020 switch (uExitReason)
6021 {
6022# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6023 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
6024 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
6025# else
6026 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
6027 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
6028# endif
6029 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
6030 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
6031 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
6032
6033 /*
6034 * We shouldn't direct host physical interrupts to the nested-guest.
6035 */
6036 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
6037
6038 /*
6039 * Instructions that cause VM-exits unconditionally or the condition is
6040 * always taken solely from the nested hypervisor (meaning if the VM-exit
6041 * happens, it's guaranteed to be a nested-guest VM-exit).
6042 *
6043 * - Provides VM-exit instruction length ONLY.
6044 */
6045 case VMX_EXIT_CPUID: /* Unconditional. */
6046 case VMX_EXIT_VMCALL:
6047 case VMX_EXIT_GETSEC:
6048 case VMX_EXIT_INVD:
6049 case VMX_EXIT_XSETBV:
6050 case VMX_EXIT_VMLAUNCH:
6051 case VMX_EXIT_VMRESUME:
6052 case VMX_EXIT_VMXOFF:
6053 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
6054 case VMX_EXIT_VMFUNC:
6055 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
6056
6057 /*
6058 * Instructions that cause VM-exits unconditionally or the condition is
6059 * always taken solely from the nested hypervisor (meaning if the VM-exit
6060 * happens, it's guaranteed to be a nested-guest VM-exit).
6061 *
6062 * - Provides VM-exit instruction length.
6063 * - Provides VM-exit information.
6064 * - Optionally provides Exit qualification.
6065 *
6066 * Since Exit qualification is 0 for all VM-exits where it is not
6067 * applicable, reading and passing it to the guest should produce
6068 * defined behavior.
6069 *
6070 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
6071 */
6072 case VMX_EXIT_INVEPT: /* Unconditional. */
6073 case VMX_EXIT_INVVPID:
6074 case VMX_EXIT_VMCLEAR:
6075 case VMX_EXIT_VMPTRLD:
6076 case VMX_EXIT_VMPTRST:
6077 case VMX_EXIT_VMXON:
6078 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
6079 case VMX_EXIT_LDTR_TR_ACCESS:
6080 case VMX_EXIT_RDRAND:
6081 case VMX_EXIT_RDSEED:
6082 case VMX_EXIT_XSAVES:
6083 case VMX_EXIT_XRSTORS:
6084 case VMX_EXIT_UMWAIT:
6085 case VMX_EXIT_TPAUSE:
6086 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6087
6088 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6089 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6090 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6091 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6092 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6093 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6094 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6095 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6096 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6097 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6098 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6099 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6100 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6101 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6102 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6103 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6104 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6105 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6106 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6107
6108 case VMX_EXIT_PREEMPT_TIMER:
6109 {
6110 /** @todo NSTVMX: Preempt timer. */
6111 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6112 }
6113
6114 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6115 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6116
6117 case VMX_EXIT_VMREAD:
6118 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6119
6120 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6121 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6122
6123 case VMX_EXIT_INIT_SIGNAL:
6124 case VMX_EXIT_SIPI:
6125 case VMX_EXIT_IO_SMI:
6126 case VMX_EXIT_SMI:
6127 case VMX_EXIT_ERR_MSR_LOAD:
6128 case VMX_EXIT_ERR_MACHINE_CHECK:
6129 case VMX_EXIT_PML_FULL:
6130 case VMX_EXIT_RSM:
6131 default:
6132 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6133 }
6134#undef VMEXIT_CALL_RET
6135}
6136#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6137
6138
6139/** @name VM-exit helpers.
6140 * @{
6141 */
6142/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6143/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6144/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6145
6146/** Macro for VM-exits called unexpectedly. */
6147#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6148 do { \
6149 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6150 return VERR_VMX_UNEXPECTED_EXIT; \
6151 } while (0)
6152
6153#ifdef VBOX_STRICT
6154# ifndef IN_NEM_DARWIN
6155/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6156# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6157 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6158
6159# define HMVMX_ASSERT_PREEMPT_CPUID() \
6160 do { \
6161 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6162 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6163 } while (0)
6164
6165# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6166 do { \
6167 AssertPtr((a_pVCpu)); \
6168 AssertPtr((a_pVmxTransient)); \
6169 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6170 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6171 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6172 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6173 Assert((a_pVmxTransient)->pVmcsInfo); \
6174 Assert(ASMIntAreEnabled()); \
6175 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6176 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6177 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6178 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6179 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6180 HMVMX_ASSERT_PREEMPT_CPUID(); \
6181 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6182 } while (0)
6183# else
6184# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6185# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6186# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6187 do { \
6188 AssertPtr((a_pVCpu)); \
6189 AssertPtr((a_pVmxTransient)); \
6190 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6191 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6192 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6193 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6194 Assert((a_pVmxTransient)->pVmcsInfo); \
6195 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6196 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6197 } while (0)
6198# endif
6199
6200# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6201 do { \
6202 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6203 Assert((a_pVmxTransient)->fIsNestedGuest); \
6204 } while (0)
6205
6206# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6207 do { \
6208 Log4Func(("\n")); \
6209 } while (0)
6210#else
6211# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6212 do { \
6213 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6214 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6215 } while (0)
6216
6217# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6218 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6219
6220# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6221#endif
6222
6223#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6224/** Macro that does the necessary privilege checks and intercepted VM-exits for
6225 * guests that attempted to execute a VMX instruction. */
6226# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6227 do \
6228 { \
6229 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6230 if (rcStrictTmp == VINF_SUCCESS) \
6231 { /* likely */ } \
6232 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6233 { \
6234 Assert((a_pVCpu)->hm.s.Event.fPending); \
6235 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6236 return VINF_SUCCESS; \
6237 } \
6238 else \
6239 { \
6240 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6241 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6242 } \
6243 } while (0)
6244
6245/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6246# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6247 do \
6248 { \
6249 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6250 (a_pGCPtrEffAddr)); \
6251 if (rcStrictTmp == VINF_SUCCESS) \
6252 { /* likely */ } \
6253 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6254 { \
6255 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6256 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6257 NOREF(uXcptTmp); \
6258 return VINF_SUCCESS; \
6259 } \
6260 else \
6261 { \
6262 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6263 return rcStrictTmp; \
6264 } \
6265 } while (0)
6266#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6267
6268
6269/**
6270 * Advances the guest RIP by the specified number of bytes.
6271 *
6272 * @param pVCpu The cross context virtual CPU structure.
6273 * @param cbInstr Number of bytes to advance the RIP by.
6274 *
6275 * @remarks No-long-jump zone!!!
6276 */
6277DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6278{
6279 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6280
6281 /*
6282 * Advance RIP.
6283 *
6284 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6285 * when the addition causes a "carry" into the upper half and check whether
6286 * we're in 64-bit and can go on with it or wether we should zap the top
6287 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6288 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6289 *
6290 * See PC wrap around tests in bs3-cpu-weird-1.
6291 */
6292 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6293 uint64_t const uRipNext = uRipPrev + cbInstr;
6294 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6295 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6296 pVCpu->cpum.GstCtx.rip = uRipNext;
6297 else
6298 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6299
6300 /*
6301 * Clear RF and interrupt shadowing.
6302 */
6303 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6304 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6305 else
6306 {
6307 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6308 {
6309 /** @todo \#DB - single step. */
6310 }
6311 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6312 }
6313 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6314
6315 /* Mark both RIP and RFLAGS as updated. */
6316 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6317}
6318
6319
6320/**
6321 * Advances the guest RIP after reading it from the VMCS.
6322 *
6323 * @returns VBox status code, no informational status codes.
6324 * @param pVCpu The cross context virtual CPU structure.
6325 * @param pVmxTransient The VMX-transient structure.
6326 *
6327 * @remarks No-long-jump zone!!!
6328 */
6329static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6330{
6331 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6332 /** @todo consider template here after checking callers. */
6333 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6334 AssertRCReturn(rc, rc);
6335
6336 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6337 return VINF_SUCCESS;
6338}
6339
6340
6341/**
6342 * Handle a condition that occurred while delivering an event through the guest or
6343 * nested-guest IDT.
6344 *
6345 * @returns Strict VBox status code (i.e. informational status codes too).
6346 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6347 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6348 * to continue execution of the guest which will delivery the \#DF.
6349 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6350 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6351 *
6352 * @param pVCpu The cross context virtual CPU structure.
6353 * @param pVmxTransient The VMX-transient structure.
6354 *
6355 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6356 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6357 * is due to an EPT violation, PML full or SPP-related event.
6358 *
6359 * @remarks No-long-jump zone!!!
6360 */
6361static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6362{
6363 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6364 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6365 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6366 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6367 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6368 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6369
6370 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6371 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6372 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6373 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6374 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6375 {
6376 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6377 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6378
6379 /*
6380 * If the event was a software interrupt (generated with INT n) or a software exception
6381 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6382 * can handle the VM-exit and continue guest execution which will re-execute the
6383 * instruction rather than re-injecting the exception, as that can cause premature
6384 * trips to ring-3 before injection and involve TRPM which currently has no way of
6385 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6386 * the problem).
6387 */
6388 IEMXCPTRAISE enmRaise;
6389 IEMXCPTRAISEINFO fRaiseInfo;
6390 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6391 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6392 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6393 {
6394 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6395 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6396 }
6397 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6398 {
6399 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6400 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6401 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6402
6403 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6404 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6405
6406 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6407
6408 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6409 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6410 {
6411 pVmxTransient->fVectoringPF = true;
6412 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6413 }
6414 }
6415 else
6416 {
6417 /*
6418 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6419 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6420 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6421 */
6422 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6423 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6424 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6425 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6426 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6427 }
6428
6429 /*
6430 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6431 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6432 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6433 * subsequent VM-entry would fail, see @bugref{7445}.
6434 *
6435 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6436 */
6437 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6438 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6439 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6440 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6441 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6442
6443 switch (enmRaise)
6444 {
6445 case IEMXCPTRAISE_CURRENT_XCPT:
6446 {
6447 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6448 Assert(rcStrict == VINF_SUCCESS);
6449 break;
6450 }
6451
6452 case IEMXCPTRAISE_PREV_EVENT:
6453 {
6454 uint32_t u32ErrCode;
6455 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6456 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6457 else
6458 u32ErrCode = 0;
6459
6460 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6461 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6462 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6463 pVCpu->cpum.GstCtx.cr2);
6464
6465 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6466 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6467 Assert(rcStrict == VINF_SUCCESS);
6468 break;
6469 }
6470
6471 case IEMXCPTRAISE_REEXEC_INSTR:
6472 Assert(rcStrict == VINF_SUCCESS);
6473 break;
6474
6475 case IEMXCPTRAISE_DOUBLE_FAULT:
6476 {
6477 /*
6478 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6479 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6480 */
6481 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6482 {
6483 pVmxTransient->fVectoringDoublePF = true;
6484 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6485 pVCpu->cpum.GstCtx.cr2));
6486 rcStrict = VINF_SUCCESS;
6487 }
6488 else
6489 {
6490 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6491 vmxHCSetPendingXcptDF(pVCpu);
6492 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6493 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6494 rcStrict = VINF_HM_DOUBLE_FAULT;
6495 }
6496 break;
6497 }
6498
6499 case IEMXCPTRAISE_TRIPLE_FAULT:
6500 {
6501 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6502 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6503 rcStrict = VINF_EM_RESET;
6504 break;
6505 }
6506
6507 case IEMXCPTRAISE_CPU_HANG:
6508 {
6509 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6510 rcStrict = VERR_EM_GUEST_CPU_HANG;
6511 break;
6512 }
6513
6514 default:
6515 {
6516 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6517 rcStrict = VERR_VMX_IPE_2;
6518 break;
6519 }
6520 }
6521 }
6522 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6523 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6524 {
6525 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6526 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6527 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6528 {
6529 /*
6530 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6531 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6532 * that virtual NMIs remain blocked until the IRET execution is completed.
6533 *
6534 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6535 */
6536 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6537 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6538 }
6539 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6540 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6541 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6542 {
6543 /*
6544 * Execution of IRET caused an EPT violation, page-modification log-full event or
6545 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6546 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6547 * that virtual NMIs remain blocked until the IRET execution is completed.
6548 *
6549 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6550 */
6551 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6552 {
6553 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6554 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6555 }
6556 }
6557 }
6558
6559 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6560 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6561 return rcStrict;
6562}
6563
6564
6565#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6566/**
6567 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6568 * guest attempting to execute a VMX instruction.
6569 *
6570 * @returns Strict VBox status code (i.e. informational status codes too).
6571 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6572 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6573 *
6574 * @param pVCpu The cross context virtual CPU structure.
6575 * @param uExitReason The VM-exit reason.
6576 *
6577 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6578 * @remarks No-long-jump zone!!!
6579 */
6580static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6581{
6582 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6583 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6584
6585 /*
6586 * The physical CPU would have already checked the CPU mode/code segment.
6587 * We shall just assert here for paranoia.
6588 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6589 */
6590 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6591 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6592 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6593
6594 if (uExitReason == VMX_EXIT_VMXON)
6595 {
6596 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6597
6598 /*
6599 * We check CR4.VMXE because it is required to be always set while in VMX operation
6600 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6601 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6602 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6603 */
6604 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6605 {
6606 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6607 vmxHCSetPendingXcptUD(pVCpu);
6608 return VINF_HM_PENDING_XCPT;
6609 }
6610 }
6611 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6612 {
6613 /*
6614 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6615 * (other than VMXON), we need to raise a #UD.
6616 */
6617 Log4Func(("Not in VMX root mode -> #UD\n"));
6618 vmxHCSetPendingXcptUD(pVCpu);
6619 return VINF_HM_PENDING_XCPT;
6620 }
6621
6622 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6623 return VINF_SUCCESS;
6624}
6625
6626
6627/**
6628 * Decodes the memory operand of an instruction that caused a VM-exit.
6629 *
6630 * The Exit qualification field provides the displacement field for memory
6631 * operand instructions, if any.
6632 *
6633 * @returns Strict VBox status code (i.e. informational status codes too).
6634 * @retval VINF_SUCCESS if the operand was successfully decoded.
6635 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6636 * operand.
6637 * @param pVCpu The cross context virtual CPU structure.
6638 * @param uExitInstrInfo The VM-exit instruction information field.
6639 * @param enmMemAccess The memory operand's access type (read or write).
6640 * @param GCPtrDisp The instruction displacement field, if any. For
6641 * RIP-relative addressing pass RIP + displacement here.
6642 * @param pGCPtrMem Where to store the effective destination memory address.
6643 *
6644 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6645 * virtual-8086 mode hence skips those checks while verifying if the
6646 * segment is valid.
6647 */
6648static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6649 PRTGCPTR pGCPtrMem)
6650{
6651 Assert(pGCPtrMem);
6652 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6653 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6654 | CPUMCTX_EXTRN_CR0);
6655
6656 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6657 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6658 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6659
6660 VMXEXITINSTRINFO ExitInstrInfo;
6661 ExitInstrInfo.u = uExitInstrInfo;
6662 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6663 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6664 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6665 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6666 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6667 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6668 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6669 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6670 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6671
6672 /*
6673 * Validate instruction information.
6674 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6675 */
6676 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6677 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6678 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6679 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6680 AssertLogRelMsgReturn(fIsMemOperand,
6681 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6682
6683 /*
6684 * Compute the complete effective address.
6685 *
6686 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6687 * See AMD spec. 4.5.2 "Segment Registers".
6688 */
6689 RTGCPTR GCPtrMem = GCPtrDisp;
6690 if (fBaseRegValid)
6691 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6692 if (fIdxRegValid)
6693 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6694
6695 RTGCPTR const GCPtrOff = GCPtrMem;
6696 if ( !fIsLongMode
6697 || iSegReg >= X86_SREG_FS)
6698 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6699 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6700
6701 /*
6702 * Validate effective address.
6703 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6704 */
6705 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6706 Assert(cbAccess > 0);
6707 if (fIsLongMode)
6708 {
6709 if (X86_IS_CANONICAL(GCPtrMem))
6710 {
6711 *pGCPtrMem = GCPtrMem;
6712 return VINF_SUCCESS;
6713 }
6714
6715 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6716 * "Data Limit Checks in 64-bit Mode". */
6717 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6718 vmxHCSetPendingXcptGP(pVCpu, 0);
6719 return VINF_HM_PENDING_XCPT;
6720 }
6721
6722 /*
6723 * This is a watered down version of iemMemApplySegment().
6724 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6725 * and segment CPL/DPL checks are skipped.
6726 */
6727 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6728 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6729 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6730
6731 /* Check if the segment is present and usable. */
6732 if ( pSel->Attr.n.u1Present
6733 && !pSel->Attr.n.u1Unusable)
6734 {
6735 Assert(pSel->Attr.n.u1DescType);
6736 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6737 {
6738 /* Check permissions for the data segment. */
6739 if ( enmMemAccess == VMXMEMACCESS_WRITE
6740 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6741 {
6742 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6743 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6744 return VINF_HM_PENDING_XCPT;
6745 }
6746
6747 /* Check limits if it's a normal data segment. */
6748 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6749 {
6750 if ( GCPtrFirst32 > pSel->u32Limit
6751 || GCPtrLast32 > pSel->u32Limit)
6752 {
6753 Log4Func(("Data segment limit exceeded. "
6754 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6755 GCPtrLast32, pSel->u32Limit));
6756 if (iSegReg == X86_SREG_SS)
6757 vmxHCSetPendingXcptSS(pVCpu, 0);
6758 else
6759 vmxHCSetPendingXcptGP(pVCpu, 0);
6760 return VINF_HM_PENDING_XCPT;
6761 }
6762 }
6763 else
6764 {
6765 /* Check limits if it's an expand-down data segment.
6766 Note! The upper boundary is defined by the B bit, not the G bit! */
6767 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6768 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6769 {
6770 Log4Func(("Expand-down data segment limit exceeded. "
6771 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6772 GCPtrLast32, pSel->u32Limit));
6773 if (iSegReg == X86_SREG_SS)
6774 vmxHCSetPendingXcptSS(pVCpu, 0);
6775 else
6776 vmxHCSetPendingXcptGP(pVCpu, 0);
6777 return VINF_HM_PENDING_XCPT;
6778 }
6779 }
6780 }
6781 else
6782 {
6783 /* Check permissions for the code segment. */
6784 if ( enmMemAccess == VMXMEMACCESS_WRITE
6785 || ( enmMemAccess == VMXMEMACCESS_READ
6786 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6787 {
6788 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6789 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6790 vmxHCSetPendingXcptGP(pVCpu, 0);
6791 return VINF_HM_PENDING_XCPT;
6792 }
6793
6794 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6795 if ( GCPtrFirst32 > pSel->u32Limit
6796 || GCPtrLast32 > pSel->u32Limit)
6797 {
6798 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6799 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6800 if (iSegReg == X86_SREG_SS)
6801 vmxHCSetPendingXcptSS(pVCpu, 0);
6802 else
6803 vmxHCSetPendingXcptGP(pVCpu, 0);
6804 return VINF_HM_PENDING_XCPT;
6805 }
6806 }
6807 }
6808 else
6809 {
6810 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6811 vmxHCSetPendingXcptGP(pVCpu, 0);
6812 return VINF_HM_PENDING_XCPT;
6813 }
6814
6815 *pGCPtrMem = GCPtrMem;
6816 return VINF_SUCCESS;
6817}
6818#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6819
6820
6821/**
6822 * VM-exit helper for LMSW.
6823 */
6824static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6825{
6826 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6827 AssertRCReturn(rc, rc);
6828
6829 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6830 AssertMsg( rcStrict == VINF_SUCCESS
6831 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6832
6833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6834 if (rcStrict == VINF_IEM_RAISED_XCPT)
6835 {
6836 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6837 rcStrict = VINF_SUCCESS;
6838 }
6839
6840 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6841 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6842 return rcStrict;
6843}
6844
6845
6846/**
6847 * VM-exit helper for CLTS.
6848 */
6849static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6850{
6851 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6852 AssertRCReturn(rc, rc);
6853
6854 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6855 AssertMsg( rcStrict == VINF_SUCCESS
6856 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6857
6858 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6859 if (rcStrict == VINF_IEM_RAISED_XCPT)
6860 {
6861 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6862 rcStrict = VINF_SUCCESS;
6863 }
6864
6865 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6866 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6867 return rcStrict;
6868}
6869
6870
6871/**
6872 * VM-exit helper for MOV from CRx (CRx read).
6873 */
6874static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6875{
6876 Assert(iCrReg < 16);
6877 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6878
6879 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6880 AssertRCReturn(rc, rc);
6881
6882 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6883 AssertMsg( rcStrict == VINF_SUCCESS
6884 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6885
6886 if (iGReg == X86_GREG_xSP)
6887 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6888 else
6889 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6890#ifdef VBOX_WITH_STATISTICS
6891 switch (iCrReg)
6892 {
6893 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6894 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6895 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6896 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6897 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6898 }
6899#endif
6900 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6901 return rcStrict;
6902}
6903
6904
6905/**
6906 * VM-exit helper for MOV to CRx (CRx write).
6907 */
6908static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6909{
6910 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6911
6912 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6913 AssertMsg( rcStrict == VINF_SUCCESS
6914 || rcStrict == VINF_IEM_RAISED_XCPT
6915 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6916
6917 switch (iCrReg)
6918 {
6919 case 0:
6920 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6921 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6922 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6923 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6924 break;
6925
6926 case 2:
6927 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6928 /* Nothing to do here, CR2 it's not part of the VMCS. */
6929 break;
6930
6931 case 3:
6932 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6933 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6934 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6935 break;
6936
6937 case 4:
6938 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6939 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6940#ifndef IN_NEM_DARWIN
6941 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6942 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6943#else
6944 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6945#endif
6946 break;
6947
6948 case 8:
6949 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6950 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6951 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6952 break;
6953
6954 default:
6955 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6956 break;
6957 }
6958
6959 if (rcStrict == VINF_IEM_RAISED_XCPT)
6960 {
6961 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6962 rcStrict = VINF_SUCCESS;
6963 }
6964 return rcStrict;
6965}
6966
6967
6968/**
6969 * VM-exit exception handler for \#PF (Page-fault exception).
6970 *
6971 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6972 */
6973static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6974{
6975 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6976 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6977
6978#ifndef IN_NEM_DARWIN
6979 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6980 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6981 { /* likely */ }
6982 else
6983#endif
6984 {
6985#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6986 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6987#endif
6988 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6989 if (!pVmxTransient->fVectoringDoublePF)
6990 {
6991 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6992 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6993 }
6994 else
6995 {
6996 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6997 Assert(!pVmxTransient->fIsNestedGuest);
6998 vmxHCSetPendingXcptDF(pVCpu);
6999 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
7000 }
7001 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7002 return VINF_SUCCESS;
7003 }
7004
7005 Assert(!pVmxTransient->fIsNestedGuest);
7006
7007 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
7008 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
7009 if (pVmxTransient->fVectoringPF)
7010 {
7011 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7012 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7013 }
7014
7015 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7016 AssertRCReturn(rc, rc);
7017
7018 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
7019 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
7020
7021 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
7022 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
7023
7024 Log4Func(("#PF: rc=%Rrc\n", rc));
7025 if (rc == VINF_SUCCESS)
7026 {
7027 /*
7028 * This is typically a shadow page table sync or a MMIO instruction. But we may have
7029 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
7030 */
7031 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7032 TRPMResetTrap(pVCpu);
7033 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
7034 return rc;
7035 }
7036
7037 if (rc == VINF_EM_RAW_GUEST_TRAP)
7038 {
7039 if (!pVmxTransient->fVectoringDoublePF)
7040 {
7041 /* It's a guest page fault and needs to be reflected to the guest. */
7042 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
7043 TRPMResetTrap(pVCpu);
7044 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
7045 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7046 uGstErrorCode, pVmxTransient->uExitQual);
7047 }
7048 else
7049 {
7050 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7051 TRPMResetTrap(pVCpu);
7052 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
7053 vmxHCSetPendingXcptDF(pVCpu);
7054 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7055 }
7056
7057 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7058 return VINF_SUCCESS;
7059 }
7060
7061 TRPMResetTrap(pVCpu);
7062 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
7063 return rc;
7064}
7065
7066
7067/**
7068 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
7069 *
7070 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7071 */
7072static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7073{
7074 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7075 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
7076
7077 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7078 AssertRCReturn(rc, rc);
7079
7080 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
7081 {
7082 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7083 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7084
7085 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7086 * provides VM-exit instruction length. If this causes problem later,
7087 * disassemble the instruction like it's done on AMD-V. */
7088 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7089 AssertRCReturn(rc2, rc2);
7090 return rc;
7091 }
7092
7093 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7094 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7095 return VINF_SUCCESS;
7096}
7097
7098
7099/**
7100 * VM-exit exception handler for \#BP (Breakpoint exception).
7101 *
7102 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7103 */
7104static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7105{
7106 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7107 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7108
7109 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7110 AssertRCReturn(rc, rc);
7111
7112 VBOXSTRICTRC rcStrict;
7113 if (!pVmxTransient->fIsNestedGuest)
7114 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7115 else
7116 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7117
7118 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7119 {
7120 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7121 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7122 rcStrict = VINF_SUCCESS;
7123 }
7124
7125 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7126 return rcStrict;
7127}
7128
7129
7130/**
7131 * VM-exit exception handler for \#AC (Alignment-check exception).
7132 *
7133 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7134 */
7135static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7136{
7137 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7138
7139 /*
7140 * Detect #ACs caused by host having enabled split-lock detection.
7141 * Emulate such instructions.
7142 */
7143#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7144 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7145 AssertRCReturn(rc, rc);
7146 /** @todo detect split lock in cpu feature? */
7147 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7148 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7149 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7150 || CPUMGetGuestCPL(pVCpu) != 3
7151 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7152 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7153 {
7154 /*
7155 * Check for debug/trace events and import state accordingly.
7156 */
7157 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7158 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7159 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7160#ifndef IN_NEM_DARWIN
7161 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7162#endif
7163 )
7164 {
7165 if (pVM->cCpus == 1)
7166 {
7167#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7168 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7169 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7170#else
7171 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7172 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7173#endif
7174 AssertRCReturn(rc, rc);
7175 }
7176 }
7177 else
7178 {
7179 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7180 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7181 AssertRCReturn(rc, rc);
7182
7183 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7184
7185 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7186 {
7187 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7188 if (rcStrict != VINF_SUCCESS)
7189 return rcStrict;
7190 }
7191 }
7192
7193 /*
7194 * Emulate the instruction.
7195 *
7196 * We have to ignore the LOCK prefix here as we must not retrigger the
7197 * detection on the host. This isn't all that satisfactory, though...
7198 */
7199 if (pVM->cCpus == 1)
7200 {
7201 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7202 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7203
7204 /** @todo For SMP configs we should do a rendezvous here. */
7205 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7206 if (rcStrict == VINF_SUCCESS)
7207#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7208 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7209 HM_CHANGED_GUEST_RIP
7210 | HM_CHANGED_GUEST_RFLAGS
7211 | HM_CHANGED_GUEST_GPRS_MASK
7212 | HM_CHANGED_GUEST_CS
7213 | HM_CHANGED_GUEST_SS);
7214#else
7215 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7216#endif
7217 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7218 {
7219 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7220 rcStrict = VINF_SUCCESS;
7221 }
7222 return rcStrict;
7223 }
7224 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7225 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7226 return VINF_EM_EMULATE_SPLIT_LOCK;
7227 }
7228
7229 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7230 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7231 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7232
7233 /* Re-inject it. We'll detect any nesting before getting here. */
7234 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7235 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7236 return VINF_SUCCESS;
7237}
7238
7239
7240/**
7241 * VM-exit exception handler for \#DB (Debug exception).
7242 *
7243 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7244 */
7245static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7246{
7247 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7248 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7249
7250 /*
7251 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7252 */
7253 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7254
7255 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7256 uint64_t const uDR6 = X86_DR6_INIT_VAL
7257 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7258 | X86_DR6_BD | X86_DR6_BS));
7259 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7260
7261 int rc;
7262 if (!pVmxTransient->fIsNestedGuest)
7263 {
7264 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7265
7266 /*
7267 * Prevents stepping twice over the same instruction when the guest is stepping using
7268 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7269 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7270 */
7271 if ( rc == VINF_EM_DBG_STEPPED
7272 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7273 {
7274 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7275 rc = VINF_EM_RAW_GUEST_TRAP;
7276 }
7277 }
7278 else
7279 rc = VINF_EM_RAW_GUEST_TRAP;
7280 Log6Func(("rc=%Rrc\n", rc));
7281 if (rc == VINF_EM_RAW_GUEST_TRAP)
7282 {
7283 /*
7284 * The exception was for the guest. Update DR6, DR7.GD and
7285 * IA32_DEBUGCTL.LBR before forwarding it.
7286 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7287 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7288 */
7289#ifndef IN_NEM_DARWIN
7290 VMMRZCallRing3Disable(pVCpu);
7291 HM_DISABLE_PREEMPT(pVCpu);
7292
7293 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7294 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7295 if (CPUMIsGuestDebugStateActive(pVCpu))
7296 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7297
7298 HM_RESTORE_PREEMPT();
7299 VMMRZCallRing3Enable(pVCpu);
7300#else
7301 /** @todo */
7302#endif
7303
7304 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7305 AssertRCReturn(rc, rc);
7306
7307 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7308 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7309
7310 /* Paranoia. */
7311 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7312 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7313
7314 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7315 AssertRC(rc);
7316
7317 /*
7318 * Raise #DB in the guest.
7319 *
7320 * It is important to reflect exactly what the VM-exit gave us (preserving the
7321 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7322 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7323 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7324 *
7325 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7326 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7327 */
7328 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7329 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7330 return VINF_SUCCESS;
7331 }
7332
7333 /*
7334 * Not a guest trap, must be a hypervisor related debug event then.
7335 * Update DR6 in case someone is interested in it.
7336 */
7337 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7338 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7339 CPUMSetHyperDR6(pVCpu, uDR6);
7340
7341 return rc;
7342}
7343
7344
7345/**
7346 * Hacks its way around the lovely mesa driver's backdoor accesses.
7347 *
7348 * @sa hmR0SvmHandleMesaDrvGp.
7349 */
7350static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7351{
7352 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7353 RT_NOREF(pCtx);
7354
7355 /* For now we'll just skip the instruction. */
7356 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7357}
7358
7359
7360/**
7361 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7362 * backdoor logging w/o checking what it is running inside.
7363 *
7364 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7365 * backdoor port and magic numbers loaded in registers.
7366 *
7367 * @returns true if it is, false if it isn't.
7368 * @sa hmR0SvmIsMesaDrvGp.
7369 */
7370DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7371{
7372 /* 0xed: IN eAX,dx */
7373 uint8_t abInstr[1];
7374 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7375 return false;
7376
7377 /* Check that it is #GP(0). */
7378 if (pVmxTransient->uExitIntErrorCode != 0)
7379 return false;
7380
7381 /* Check magic and port. */
7382 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7383 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7384 if (pCtx->rax != UINT32_C(0x564d5868))
7385 return false;
7386 if (pCtx->dx != UINT32_C(0x5658))
7387 return false;
7388
7389 /* Flat ring-3 CS. */
7390 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7391 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7392 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7393 if (pCtx->cs.Attr.n.u2Dpl != 3)
7394 return false;
7395 if (pCtx->cs.u64Base != 0)
7396 return false;
7397
7398 /* Check opcode. */
7399 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7400 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7401 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7402 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7403 if (RT_FAILURE(rc))
7404 return false;
7405 if (abInstr[0] != 0xed)
7406 return false;
7407
7408 return true;
7409}
7410
7411
7412/**
7413 * VM-exit exception handler for \#GP (General-protection exception).
7414 *
7415 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7416 */
7417static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7418{
7419 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7420 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7421
7422 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7423 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7424#ifndef IN_NEM_DARWIN
7425 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7426 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7427 { /* likely */ }
7428 else
7429#endif
7430 {
7431#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7432# ifndef IN_NEM_DARWIN
7433 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7434# else
7435 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7436# endif
7437#endif
7438 /*
7439 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7440 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7441 */
7442 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7443 AssertRCReturn(rc, rc);
7444 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7445 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7446
7447 if ( pVmxTransient->fIsNestedGuest
7448 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7449 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7450 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7451 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7452 else
7453 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7454 return rc;
7455 }
7456
7457#ifndef IN_NEM_DARWIN
7458 Assert(CPUMIsGuestInRealModeEx(pCtx));
7459 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7460 Assert(!pVmxTransient->fIsNestedGuest);
7461
7462 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7463 AssertRCReturn(rc, rc);
7464
7465 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7466 if (rcStrict == VINF_SUCCESS)
7467 {
7468 if (!CPUMIsGuestInRealModeEx(pCtx))
7469 {
7470 /*
7471 * The guest is no longer in real-mode, check if we can continue executing the
7472 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7473 */
7474 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7475 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7476 {
7477 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7478 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7479 }
7480 else
7481 {
7482 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7483 rcStrict = VINF_EM_RESCHEDULE;
7484 }
7485 }
7486 else
7487 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7488 }
7489 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7490 {
7491 rcStrict = VINF_SUCCESS;
7492 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7493 }
7494 return VBOXSTRICTRC_VAL(rcStrict);
7495#endif
7496}
7497
7498
7499/**
7500 * VM-exit exception handler for \#DE (Divide Error).
7501 *
7502 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7503 */
7504static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7505{
7506 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7507 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7508
7509 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7510 AssertRCReturn(rc, rc);
7511
7512 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7513 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7514 {
7515 uint8_t cbInstr = 0;
7516 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7517 if (rc2 == VINF_SUCCESS)
7518 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7519 else if (rc2 == VERR_NOT_FOUND)
7520 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7521 else
7522 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7523 }
7524 else
7525 rcStrict = VINF_SUCCESS; /* Do nothing. */
7526
7527 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7528 if (RT_FAILURE(rcStrict))
7529 {
7530 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7531 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7532 rcStrict = VINF_SUCCESS;
7533 }
7534
7535 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7536 return VBOXSTRICTRC_VAL(rcStrict);
7537}
7538
7539
7540/**
7541 * VM-exit exception handler wrapper for all other exceptions that are not handled
7542 * by a specific handler.
7543 *
7544 * This simply re-injects the exception back into the VM without any special
7545 * processing.
7546 *
7547 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7548 */
7549static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7550{
7551 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7552
7553#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7554# ifndef IN_NEM_DARWIN
7555 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7556 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7557 ("uVector=%#x u32XcptBitmap=%#X32\n",
7558 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7559 NOREF(pVmcsInfo);
7560# endif
7561#endif
7562
7563 /*
7564 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7565 * would have been handled while checking exits due to event delivery.
7566 */
7567 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7568
7569#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7570 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7571 AssertRCReturn(rc, rc);
7572 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7573#endif
7574
7575#ifdef VBOX_WITH_STATISTICS
7576 switch (uVector)
7577 {
7578 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7579 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7580 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7581 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7582 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7583 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7584 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7585 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7586 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7587 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7588 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7589 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7590 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7591 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7592 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7593 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7594 default:
7595 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7596 break;
7597 }
7598#endif
7599
7600 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7601 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7602 NOREF(uVector);
7603
7604 /* Re-inject the original exception into the guest. */
7605 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7606 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7607 return VINF_SUCCESS;
7608}
7609
7610
7611/**
7612 * VM-exit exception handler for all exceptions (except NMIs!).
7613 *
7614 * @remarks This may be called for both guests and nested-guests. Take care to not
7615 * make assumptions and avoid doing anything that is not relevant when
7616 * executing a nested-guest (e.g., Mesa driver hacks).
7617 */
7618static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7619{
7620 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7621
7622 /*
7623 * If this VM-exit occurred while delivering an event through the guest IDT, take
7624 * action based on the return code and additional hints (e.g. for page-faults)
7625 * that will be updated in the VMX transient structure.
7626 */
7627 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7628 if (rcStrict == VINF_SUCCESS)
7629 {
7630 /*
7631 * If an exception caused a VM-exit due to delivery of an event, the original
7632 * event may have to be re-injected into the guest. We shall reinject it and
7633 * continue guest execution. However, page-fault is a complicated case and
7634 * needs additional processing done in vmxHCExitXcptPF().
7635 */
7636 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7637 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7638 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7639 || uVector == X86_XCPT_PF)
7640 {
7641 switch (uVector)
7642 {
7643 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7644 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7645 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7646 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7647 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7648 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7649 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7650 default:
7651 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7652 }
7653 }
7654 /* else: inject pending event before resuming guest execution. */
7655 }
7656 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7657 {
7658 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7659 rcStrict = VINF_SUCCESS;
7660 }
7661
7662 return rcStrict;
7663}
7664/** @} */
7665
7666
7667/** @name VM-exit handlers.
7668 * @{
7669 */
7670/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7671/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7672/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7673
7674/**
7675 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7676 */
7677HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7678{
7679 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7680 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7681
7682#ifndef IN_NEM_DARWIN
7683 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7684 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7685 return VINF_SUCCESS;
7686 return VINF_EM_RAW_INTERRUPT;
7687#else
7688 return VINF_SUCCESS;
7689#endif
7690}
7691
7692
7693/**
7694 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7695 * VM-exit.
7696 */
7697HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7698{
7699 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7700 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7701
7702 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7703
7704 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7705 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7706 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7707
7708 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7709 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7710 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7711 NOREF(pVmcsInfo);
7712
7713 VBOXSTRICTRC rcStrict;
7714 switch (uExitIntType)
7715 {
7716#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7717 /*
7718 * Host physical NMIs:
7719 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7720 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7721 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7722 *
7723 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7724 * See Intel spec. 27.5.5 "Updating Non-Register State".
7725 */
7726 case VMX_EXIT_INT_INFO_TYPE_NMI:
7727 {
7728 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7729 break;
7730 }
7731#endif
7732
7733 /*
7734 * Privileged software exceptions (#DB from ICEBP),
7735 * Software exceptions (#BP and #OF),
7736 * Hardware exceptions:
7737 * Process the required exceptions and resume guest execution if possible.
7738 */
7739 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7740 Assert(uVector == X86_XCPT_DB);
7741 RT_FALL_THRU();
7742 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7743 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7744 RT_FALL_THRU();
7745 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7746 {
7747 NOREF(uVector);
7748 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7749 | HMVMX_READ_EXIT_INSTR_LEN
7750 | HMVMX_READ_IDT_VECTORING_INFO
7751 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7752 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7753 break;
7754 }
7755
7756 default:
7757 {
7758 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7759 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7760 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7761 break;
7762 }
7763 }
7764
7765 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7766 return rcStrict;
7767}
7768
7769
7770/**
7771 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7772 */
7773HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7774{
7775 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7776
7777 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7778 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7779 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7780
7781 /* Evaluate and deliver pending events and resume guest execution. */
7782 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7783 return VINF_SUCCESS;
7784}
7785
7786
7787/**
7788 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7789 */
7790HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7791{
7792 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7793
7794 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7795 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7796 {
7797 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7798 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7799 }
7800
7801 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7802
7803 /*
7804 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7805 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7806 */
7807 uint32_t fIntrState;
7808 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7809 AssertRC(rc);
7810 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7811 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7812 {
7813 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7814
7815 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7816 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7817 AssertRC(rc);
7818 }
7819
7820 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */
7821 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7822
7823 /* Evaluate and deliver pending events and resume guest execution. */
7824 return VINF_SUCCESS;
7825}
7826
7827
7828/**
7829 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7830 */
7831HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7832{
7833 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7834 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7835}
7836
7837
7838/**
7839 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7840 */
7841HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7842{
7843 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7844 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7845}
7846
7847
7848/**
7849 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7850 */
7851HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7852{
7853 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7854
7855 /*
7856 * Get the state we need and update the exit history entry.
7857 */
7858 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7859 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7860 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7861 AssertRCReturn(rc, rc);
7862
7863 VBOXSTRICTRC rcStrict;
7864 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7865 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7866 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7867 if (!pExitRec)
7868 {
7869 /*
7870 * Regular CPUID instruction execution.
7871 */
7872 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7873 if (rcStrict == VINF_SUCCESS)
7874 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7875 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7876 {
7877 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7878 rcStrict = VINF_SUCCESS;
7879 }
7880 }
7881 else
7882 {
7883 /*
7884 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7885 */
7886 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7887 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7888 AssertRCReturn(rc2, rc2);
7889
7890 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7891 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7892
7893 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7894 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7895
7896 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7897 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7898 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7899 }
7900 return rcStrict;
7901}
7902
7903
7904/**
7905 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7906 */
7907HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7908{
7909 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7910
7911 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7912 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7913 AssertRCReturn(rc, rc);
7914
7915 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7916 return VINF_EM_RAW_EMULATE_INSTR;
7917
7918 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7919 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7920}
7921
7922
7923/**
7924 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7925 */
7926HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7927{
7928 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7929
7930 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7931 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7932 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7933 AssertRCReturn(rc, rc);
7934
7935 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7936 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7937 {
7938 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7939 we must reset offsetting on VM-entry. See @bugref{6634}. */
7940 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7941 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7942 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7943 }
7944 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7945 {
7946 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7947 rcStrict = VINF_SUCCESS;
7948 }
7949 return rcStrict;
7950}
7951
7952
7953/**
7954 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7955 */
7956HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7957{
7958 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7959
7960 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7961 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7962 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7963 AssertRCReturn(rc, rc);
7964
7965 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7966 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7967 {
7968 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7969 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7970 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7971 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7972 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7973 }
7974 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7975 {
7976 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7977 rcStrict = VINF_SUCCESS;
7978 }
7979 return rcStrict;
7980}
7981
7982
7983/**
7984 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7985 */
7986HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7987{
7988 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7989
7990 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7991 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7992 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7993 AssertRCReturn(rc, rc);
7994
7995 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7996 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7997 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7998 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7999 {
8000 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8001 rcStrict = VINF_SUCCESS;
8002 }
8003 return rcStrict;
8004}
8005
8006
8007/**
8008 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
8009 */
8010HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8011{
8012 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8013
8014 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
8015 if (EMAreHypercallInstructionsEnabled(pVCpu))
8016 {
8017 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8018 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
8019 | CPUMCTX_EXTRN_RFLAGS
8020 | CPUMCTX_EXTRN_CR0
8021 | CPUMCTX_EXTRN_SS
8022 | CPUMCTX_EXTRN_CS
8023 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
8024 AssertRCReturn(rc, rc);
8025
8026 /* Perform the hypercall. */
8027 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8028 if (rcStrict == VINF_SUCCESS)
8029 {
8030 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8031 AssertRCReturn(rc, rc);
8032 }
8033 else
8034 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
8035 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
8036 || RT_FAILURE(rcStrict));
8037
8038 /* If the hypercall changes anything other than guest's general-purpose registers,
8039 we would need to reload the guest changed bits here before VM-entry. */
8040 }
8041 else
8042 Log4Func(("Hypercalls not enabled\n"));
8043
8044 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
8045 if (RT_FAILURE(rcStrict))
8046 {
8047 vmxHCSetPendingXcptUD(pVCpu);
8048 rcStrict = VINF_SUCCESS;
8049 }
8050
8051 return rcStrict;
8052}
8053
8054
8055/**
8056 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8057 */
8058HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8059{
8060 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8061#ifndef IN_NEM_DARWIN
8062 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
8063#endif
8064
8065 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8066 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8067 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8068 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8069 AssertRCReturn(rc, rc);
8070
8071 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
8072
8073 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
8074 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8075 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8076 {
8077 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8078 rcStrict = VINF_SUCCESS;
8079 }
8080 else
8081 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8082 VBOXSTRICTRC_VAL(rcStrict)));
8083 return rcStrict;
8084}
8085
8086
8087/**
8088 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8089 */
8090HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8091{
8092 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8093
8094 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8095 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8096 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8097 AssertRCReturn(rc, rc);
8098
8099 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8100 if (rcStrict == VINF_SUCCESS)
8101 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8102 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8103 {
8104 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8105 rcStrict = VINF_SUCCESS;
8106 }
8107
8108 return rcStrict;
8109}
8110
8111
8112/**
8113 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8114 */
8115HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8116{
8117 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8118
8119 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8120 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8121 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8122 AssertRCReturn(rc, rc);
8123
8124 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8125 if (RT_SUCCESS(rcStrict))
8126 {
8127 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8128 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8129 rcStrict = VINF_SUCCESS;
8130 }
8131
8132 return rcStrict;
8133}
8134
8135
8136/**
8137 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8138 * VM-exit.
8139 */
8140HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8141{
8142 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8143 return VINF_EM_RESET;
8144}
8145
8146
8147/**
8148 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8149 */
8150HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8151{
8152 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8153
8154 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8155 AssertRCReturn(rc, rc);
8156
8157 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8158 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8159 rc = VINF_SUCCESS;
8160 else
8161 rc = VINF_EM_HALT;
8162
8163 if (rc != VINF_SUCCESS)
8164 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8165 return rc;
8166}
8167
8168
8169#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8170/**
8171 * VM-exit handler for instructions that result in a \#UD exception delivered to
8172 * the guest.
8173 */
8174HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8175{
8176 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8177 vmxHCSetPendingXcptUD(pVCpu);
8178 return VINF_SUCCESS;
8179}
8180#endif
8181
8182
8183/**
8184 * VM-exit handler for expiry of the VMX-preemption timer.
8185 */
8186HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8187{
8188 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8189
8190 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8191 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8192Log12(("vmxHCExitPreemptTimer:\n"));
8193
8194 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8195 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8196 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8197 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8198 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8199}
8200
8201
8202/**
8203 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8204 */
8205HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8206{
8207 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8208
8209 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8210 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8211 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8212 AssertRCReturn(rc, rc);
8213
8214 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8215 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8216 : HM_CHANGED_RAISED_XCPT_MASK);
8217
8218#ifndef IN_NEM_DARWIN
8219 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8220 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8221 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8222 {
8223 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8224 hmR0VmxUpdateStartVmFunction(pVCpu);
8225 }
8226#endif
8227
8228 return rcStrict;
8229}
8230
8231
8232/**
8233 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8234 */
8235HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8236{
8237 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8238
8239 /** @todo Enable the new code after finding a reliably guest test-case. */
8240#if 1
8241 return VERR_EM_INTERPRETER;
8242#else
8243 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8244 | HMVMX_READ_EXIT_INSTR_INFO
8245 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8246 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8247 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8248 AssertRCReturn(rc, rc);
8249
8250 /* Paranoia. Ensure this has a memory operand. */
8251 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8252
8253 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8254 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8255 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8256 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8257
8258 RTGCPTR GCPtrDesc;
8259 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8260
8261 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8262 GCPtrDesc, uType);
8263 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8264 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8265 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8266 {
8267 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8268 rcStrict = VINF_SUCCESS;
8269 }
8270 return rcStrict;
8271#endif
8272}
8273
8274
8275/**
8276 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8277 * VM-exit.
8278 */
8279HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8280{
8281 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8282 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8283 AssertRCReturn(rc, rc);
8284
8285 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8286 if (RT_FAILURE(rc))
8287 return rc;
8288
8289 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8290 NOREF(uInvalidReason);
8291
8292#ifdef VBOX_STRICT
8293 uint32_t fIntrState;
8294 uint64_t u64Val;
8295 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8296 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8297 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8298
8299 Log4(("uInvalidReason %u\n", uInvalidReason));
8300 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8301 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8302 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8303
8304 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8305 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8306 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8307 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8308 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8309 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8310 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8311 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8312 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8313 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8314 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8315 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8316# ifndef IN_NEM_DARWIN
8317 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8318 {
8319 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8320 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8321 }
8322
8323 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8324# endif
8325#endif
8326
8327 return VERR_VMX_INVALID_GUEST_STATE;
8328}
8329
8330/**
8331 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8332 */
8333HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8334{
8335 /*
8336 * Cumulative notes of all recognized but unexpected VM-exits.
8337 *
8338 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8339 * nested-paging is used.
8340 *
8341 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8342 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8343 * this function (and thereby stop VM execution) for handling such instructions.
8344 *
8345 *
8346 * VMX_EXIT_INIT_SIGNAL:
8347 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8348 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8349 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8350 *
8351 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8352 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8353 * See Intel spec. "23.8 Restrictions on VMX operation".
8354 *
8355 * VMX_EXIT_SIPI:
8356 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8357 * activity state is used. We don't make use of it as our guests don't have direct
8358 * access to the host local APIC.
8359 *
8360 * See Intel spec. 25.3 "Other Causes of VM-exits".
8361 *
8362 * VMX_EXIT_IO_SMI:
8363 * VMX_EXIT_SMI:
8364 * This can only happen if we support dual-monitor treatment of SMI, which can be
8365 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8366 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8367 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8368 *
8369 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8370 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8371 *
8372 * VMX_EXIT_ERR_MSR_LOAD:
8373 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8374 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8375 * execution.
8376 *
8377 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8378 *
8379 * VMX_EXIT_ERR_MACHINE_CHECK:
8380 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8381 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8382 * #MC exception abort class exception is raised. We thus cannot assume a
8383 * reasonable chance of continuing any sort of execution and we bail.
8384 *
8385 * See Intel spec. 15.1 "Machine-check Architecture".
8386 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8387 *
8388 * VMX_EXIT_PML_FULL:
8389 * VMX_EXIT_VIRTUALIZED_EOI:
8390 * VMX_EXIT_APIC_WRITE:
8391 * We do not currently support any of these features and thus they are all unexpected
8392 * VM-exits.
8393 *
8394 * VMX_EXIT_GDTR_IDTR_ACCESS:
8395 * VMX_EXIT_LDTR_TR_ACCESS:
8396 * VMX_EXIT_RDRAND:
8397 * VMX_EXIT_RSM:
8398 * VMX_EXIT_VMFUNC:
8399 * VMX_EXIT_ENCLS:
8400 * VMX_EXIT_RDSEED:
8401 * VMX_EXIT_XSAVES:
8402 * VMX_EXIT_XRSTORS:
8403 * VMX_EXIT_UMWAIT:
8404 * VMX_EXIT_TPAUSE:
8405 * VMX_EXIT_LOADIWKEY:
8406 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8407 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8408 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8409 *
8410 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8411 */
8412 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8413 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8414 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8415}
8416
8417
8418/**
8419 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8420 */
8421HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8422{
8423 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8424
8425 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8426
8427 /** @todo Optimize this: We currently drag in the whole MSR state
8428 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8429 * MSRs required. That would require changes to IEM and possibly CPUM too.
8430 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8431 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8432 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8433 int rc;
8434 switch (idMsr)
8435 {
8436 default:
8437 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8438 __FUNCTION__);
8439 AssertRCReturn(rc, rc);
8440 break;
8441 case MSR_K8_FS_BASE:
8442 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8443 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8444 AssertRCReturn(rc, rc);
8445 break;
8446 case MSR_K8_GS_BASE:
8447 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8448 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8449 AssertRCReturn(rc, rc);
8450 break;
8451 }
8452
8453 Log4Func(("ecx=%#RX32\n", idMsr));
8454
8455#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8456 Assert(!pVmxTransient->fIsNestedGuest);
8457 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8458 {
8459 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8460 && idMsr != MSR_K6_EFER)
8461 {
8462 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8463 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8464 }
8465 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8466 {
8467 Assert(pVmcsInfo->pvMsrBitmap);
8468 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8469 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8470 {
8471 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8472 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8473 }
8474 }
8475 }
8476#endif
8477
8478 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8479 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8480 if (rcStrict == VINF_SUCCESS)
8481 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8482 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8483 {
8484 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8485 rcStrict = VINF_SUCCESS;
8486 }
8487 else
8488 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8489 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8490
8491 return rcStrict;
8492}
8493
8494
8495/**
8496 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8497 */
8498HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8499{
8500 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8501
8502 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8503
8504 /*
8505 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8506 * Although we don't need to fetch the base as it will be overwritten shortly, while
8507 * loading guest-state we would also load the entire segment register including limit
8508 * and attributes and thus we need to load them here.
8509 */
8510 /** @todo Optimize this: We currently drag in the whole MSR state
8511 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8512 * MSRs required. That would require changes to IEM and possibly CPUM too.
8513 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8514 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8515 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8516 int rc;
8517 switch (idMsr)
8518 {
8519 default:
8520 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8521 __FUNCTION__);
8522 AssertRCReturn(rc, rc);
8523 break;
8524
8525 case MSR_K8_FS_BASE:
8526 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8527 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8528 AssertRCReturn(rc, rc);
8529 break;
8530 case MSR_K8_GS_BASE:
8531 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8532 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8533 AssertRCReturn(rc, rc);
8534 break;
8535 }
8536 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8537
8538 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8539 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8540
8541 if (rcStrict == VINF_SUCCESS)
8542 {
8543 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8544
8545 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8546 if ( idMsr == MSR_IA32_APICBASE
8547 || ( idMsr >= MSR_IA32_X2APIC_START
8548 && idMsr <= MSR_IA32_X2APIC_END))
8549 {
8550 /*
8551 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8552 * When full APIC register virtualization is implemented we'll have to make
8553 * sure APIC state is saved from the VMCS before IEM changes it.
8554 */
8555 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8556 }
8557 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8558 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8559 else if (idMsr == MSR_K6_EFER)
8560 {
8561 /*
8562 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8563 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8564 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8565 */
8566 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8567 }
8568
8569 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8570 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8571 {
8572 switch (idMsr)
8573 {
8574 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8575 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8576 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8577 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8578 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8579 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8580 default:
8581 {
8582#ifndef IN_NEM_DARWIN
8583 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8584 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8585 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8586 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8587#else
8588 AssertMsgFailed(("TODO\n"));
8589#endif
8590 break;
8591 }
8592 }
8593 }
8594#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8595 else
8596 {
8597 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8598 switch (idMsr)
8599 {
8600 case MSR_IA32_SYSENTER_CS:
8601 case MSR_IA32_SYSENTER_EIP:
8602 case MSR_IA32_SYSENTER_ESP:
8603 case MSR_K8_FS_BASE:
8604 case MSR_K8_GS_BASE:
8605 {
8606 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8607 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8608 }
8609
8610 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8611 default:
8612 {
8613 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8614 {
8615 /* EFER MSR writes are always intercepted. */
8616 if (idMsr != MSR_K6_EFER)
8617 {
8618 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8619 idMsr));
8620 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8621 }
8622 }
8623
8624 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8625 {
8626 Assert(pVmcsInfo->pvMsrBitmap);
8627 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8628 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8629 {
8630 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8631 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8632 }
8633 }
8634 break;
8635 }
8636 }
8637 }
8638#endif /* VBOX_STRICT */
8639 }
8640 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8641 {
8642 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8643 rcStrict = VINF_SUCCESS;
8644 }
8645 else
8646 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8647 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8648
8649 return rcStrict;
8650}
8651
8652
8653/**
8654 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8655 */
8656HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8657{
8658 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8659
8660 /** @todo The guest has likely hit a contended spinlock. We might want to
8661 * poke a schedule different guest VCPU. */
8662 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8663 if (RT_SUCCESS(rc))
8664 return VINF_EM_RAW_INTERRUPT;
8665
8666 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8667 return rc;
8668}
8669
8670
8671/**
8672 * VM-exit handler for when the TPR value is lowered below the specified
8673 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8674 */
8675HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8676{
8677 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8678 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8679
8680 /*
8681 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8682 * We'll re-evaluate pending interrupts and inject them before the next VM
8683 * entry so we can just continue execution here.
8684 */
8685 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8686 return VINF_SUCCESS;
8687}
8688
8689
8690/**
8691 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8692 * VM-exit.
8693 *
8694 * @retval VINF_SUCCESS when guest execution can continue.
8695 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8696 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8697 * incompatible guest state for VMX execution (real-on-v86 case).
8698 */
8699HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8700{
8701 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8702 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8703
8704 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8705 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8706 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8707
8708 VBOXSTRICTRC rcStrict;
8709 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8710 uint64_t const uExitQual = pVmxTransient->uExitQual;
8711 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8712 switch (uAccessType)
8713 {
8714 /*
8715 * MOV to CRx.
8716 */
8717 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8718 {
8719 /*
8720 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8721 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8722 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8723 * PAE PDPTEs as well.
8724 */
8725 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8726 AssertRCReturn(rc, rc);
8727
8728 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8729#ifndef IN_NEM_DARWIN
8730 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8731#endif
8732 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8733 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8734
8735 /*
8736 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8737 * - When nested paging isn't used.
8738 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8739 * - We are executing in the VM debug loop.
8740 */
8741#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8742# ifndef IN_NEM_DARWIN
8743 Assert( iCrReg != 3
8744 || !VM_IS_VMX_NESTED_PAGING(pVM)
8745 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8746 || pVCpu->hmr0.s.fUsingDebugLoop);
8747# else
8748 Assert( iCrReg != 3
8749 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8750# endif
8751#endif
8752
8753 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8754 Assert( iCrReg != 8
8755 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8756
8757 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8758 AssertMsg( rcStrict == VINF_SUCCESS
8759 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8760
8761#ifndef IN_NEM_DARWIN
8762 /*
8763 * This is a kludge for handling switches back to real mode when we try to use
8764 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8765 * deal with special selector values, so we have to return to ring-3 and run
8766 * there till the selector values are V86 mode compatible.
8767 *
8768 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8769 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8770 * this function.
8771 */
8772 if ( iCrReg == 0
8773 && rcStrict == VINF_SUCCESS
8774 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8775 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8776 && (uOldCr0 & X86_CR0_PE)
8777 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8778 {
8779 /** @todo Check selectors rather than returning all the time. */
8780 Assert(!pVmxTransient->fIsNestedGuest);
8781 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8782 rcStrict = VINF_EM_RESCHEDULE_REM;
8783 }
8784#endif
8785
8786 break;
8787 }
8788
8789 /*
8790 * MOV from CRx.
8791 */
8792 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8793 {
8794 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8795 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8796
8797 /*
8798 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8799 * - When nested paging isn't used.
8800 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8801 * - We are executing in the VM debug loop.
8802 */
8803#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8804# ifndef IN_NEM_DARWIN
8805 Assert( iCrReg != 3
8806 || !VM_IS_VMX_NESTED_PAGING(pVM)
8807 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8808 || pVCpu->hmr0.s.fLeaveDone);
8809# else
8810 Assert( iCrReg != 3
8811 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8812# endif
8813#endif
8814
8815 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8816 Assert( iCrReg != 8
8817 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8818
8819 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8820 break;
8821 }
8822
8823 /*
8824 * CLTS (Clear Task-Switch Flag in CR0).
8825 */
8826 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8827 {
8828 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8829 break;
8830 }
8831
8832 /*
8833 * LMSW (Load Machine-Status Word into CR0).
8834 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8835 */
8836 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8837 {
8838 RTGCPTR GCPtrEffDst;
8839 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8840 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8841 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8842 if (fMemOperand)
8843 {
8844 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8845 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8846 }
8847 else
8848 GCPtrEffDst = NIL_RTGCPTR;
8849 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8850 break;
8851 }
8852
8853 default:
8854 {
8855 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8856 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8857 }
8858 }
8859
8860 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8861 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8862 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8863
8864 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8865 NOREF(pVM);
8866 return rcStrict;
8867}
8868
8869
8870/**
8871 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8872 * VM-exit.
8873 */
8874HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8875{
8876 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8877 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8878
8879 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8880 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8881 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8882 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8883#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8884 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8885 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8886 AssertRCReturn(rc, rc);
8887
8888 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8889 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8890 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8891 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8892 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8893 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8894 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8895 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8896
8897 /*
8898 * Update exit history to see if this exit can be optimized.
8899 */
8900 VBOXSTRICTRC rcStrict;
8901 PCEMEXITREC pExitRec = NULL;
8902 if ( !fGstStepping
8903 && !fDbgStepping)
8904 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8905 !fIOString
8906 ? !fIOWrite
8907 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8908 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8909 : !fIOWrite
8910 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8911 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8912 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8913 if (!pExitRec)
8914 {
8915 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8916 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8917
8918 uint32_t const cbValue = s_aIOSizes[uIOSize];
8919 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8920 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8921 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8922 if (fIOString)
8923 {
8924 /*
8925 * INS/OUTS - I/O String instruction.
8926 *
8927 * Use instruction-information if available, otherwise fall back on
8928 * interpreting the instruction.
8929 */
8930 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8931 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8932 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8933 if (fInsOutsInfo)
8934 {
8935 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8936 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8937 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8938 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8939 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8940 if (fIOWrite)
8941 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8942 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8943 else
8944 {
8945 /*
8946 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8947 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8948 * See Intel Instruction spec. for "INS".
8949 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8950 */
8951 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8952 }
8953 }
8954 else
8955 rcStrict = IEMExecOne(pVCpu);
8956
8957 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8958 fUpdateRipAlready = true;
8959 }
8960 else
8961 {
8962 /*
8963 * IN/OUT - I/O instruction.
8964 */
8965 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8966 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8967 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8968 if (fIOWrite)
8969 {
8970 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8971 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8972#ifndef IN_NEM_DARWIN
8973 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8974 && !pCtx->eflags.Bits.u1TF)
8975 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8976#endif
8977 }
8978 else
8979 {
8980 uint32_t u32Result = 0;
8981 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8982 if (IOM_SUCCESS(rcStrict))
8983 {
8984 /* Save result of I/O IN instr. in AL/AX/EAX. */
8985 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8986 }
8987#ifndef IN_NEM_DARWIN
8988 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8989 && !pCtx->eflags.Bits.u1TF)
8990 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8991#endif
8992 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8993 }
8994 }
8995
8996 if (IOM_SUCCESS(rcStrict))
8997 {
8998 if (!fUpdateRipAlready)
8999 {
9000 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
9001 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
9002 }
9003
9004 /*
9005 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
9006 * while booting Fedora 17 64-bit guest.
9007 *
9008 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
9009 */
9010 if (fIOString)
9011 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
9012
9013 /*
9014 * If any I/O breakpoints are armed, we need to check if one triggered
9015 * and take appropriate action.
9016 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9017 */
9018#if 1
9019 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
9020#else
9021 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
9022 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
9023 AssertRCReturn(rc, rc);
9024#endif
9025
9026 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9027 * execution engines about whether hyper BPs and such are pending. */
9028 uint32_t const uDr7 = pCtx->dr[7];
9029 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9030 && X86_DR7_ANY_RW_IO(uDr7)
9031 && (pCtx->cr4 & X86_CR4_DE))
9032 || DBGFBpIsHwIoArmed(pVM)))
9033 {
9034 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
9035
9036#ifndef IN_NEM_DARWIN
9037 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
9038 VMMRZCallRing3Disable(pVCpu);
9039 HM_DISABLE_PREEMPT(pVCpu);
9040
9041 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
9042
9043 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
9044 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9045 {
9046 /* Raise #DB. */
9047 if (fIsGuestDbgActive)
9048 ASMSetDR6(pCtx->dr[6]);
9049 if (pCtx->dr[7] != uDr7)
9050 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
9051
9052 vmxHCSetPendingXcptDB(pVCpu);
9053 }
9054 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
9055 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
9056 else if ( rcStrict2 != VINF_SUCCESS
9057 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9058 rcStrict = rcStrict2;
9059 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
9060
9061 HM_RESTORE_PREEMPT();
9062 VMMRZCallRing3Enable(pVCpu);
9063#else
9064 /** @todo */
9065#endif
9066 }
9067 }
9068
9069#ifdef VBOX_STRICT
9070 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9071 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
9072 Assert(!fIOWrite);
9073 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9074 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
9075 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
9076 Assert(fIOWrite);
9077 else
9078 {
9079# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9080 * statuses, that the VMM device and some others may return. See
9081 * IOM_SUCCESS() for guidance. */
9082 AssertMsg( RT_FAILURE(rcStrict)
9083 || rcStrict == VINF_SUCCESS
9084 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9085 || rcStrict == VINF_EM_DBG_BREAKPOINT
9086 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9087 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9088# endif
9089 }
9090#endif
9091 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9092 }
9093 else
9094 {
9095 /*
9096 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9097 */
9098 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9099 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9100 AssertRCReturn(rc2, rc2);
9101 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9102 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9103 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9104 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9105 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9106 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9107
9108 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9109 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9110
9111 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9112 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9113 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9114 }
9115 return rcStrict;
9116}
9117
9118
9119/**
9120 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9121 * VM-exit.
9122 */
9123HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9124{
9125 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9126
9127 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9128 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9129 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9130 {
9131 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9132 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9133 {
9134 uint32_t uErrCode;
9135 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9136 {
9137 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9138 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9139 }
9140 else
9141 uErrCode = 0;
9142
9143 RTGCUINTPTR GCPtrFaultAddress;
9144 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9145 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9146 else
9147 GCPtrFaultAddress = 0;
9148
9149 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9150
9151 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9152 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9153
9154 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9155 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9156 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9157 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9158 }
9159 }
9160
9161 /* Fall back to the interpreter to emulate the task-switch. */
9162 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9163 return VERR_EM_INTERPRETER;
9164}
9165
9166
9167/**
9168 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9169 */
9170HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9171{
9172 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9173
9174 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9175 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9176 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9177 AssertRC(rc);
9178 return VINF_EM_DBG_STEPPED;
9179}
9180
9181
9182/**
9183 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9184 */
9185HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9186{
9187 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9188 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9189
9190 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9191 | HMVMX_READ_EXIT_INSTR_LEN
9192 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9193 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9194 | HMVMX_READ_IDT_VECTORING_INFO
9195 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9196
9197 /*
9198 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9199 */
9200 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9201 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9202 {
9203 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9204 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9205 {
9206 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9207 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9208 }
9209 }
9210 else
9211 {
9212 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9213 return rcStrict;
9214 }
9215
9216 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9217 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9218 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9219 AssertRCReturn(rc, rc);
9220
9221 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9222 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9223 switch (uAccessType)
9224 {
9225#ifndef IN_NEM_DARWIN
9226 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9227 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9228 {
9229 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9230 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9231 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9232
9233 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9234 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9235 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9236 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9237 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9238
9239 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9240 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9241 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9242 if ( rcStrict == VINF_SUCCESS
9243 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9244 || rcStrict == VERR_PAGE_NOT_PRESENT)
9245 {
9246 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9247 | HM_CHANGED_GUEST_APIC_TPR);
9248 rcStrict = VINF_SUCCESS;
9249 }
9250 break;
9251 }
9252#else
9253 /** @todo */
9254#endif
9255
9256 default:
9257 {
9258 Log4Func(("uAccessType=%#x\n", uAccessType));
9259 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9260 break;
9261 }
9262 }
9263
9264 if (rcStrict != VINF_SUCCESS)
9265 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9266 return rcStrict;
9267}
9268
9269
9270/**
9271 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9272 * VM-exit.
9273 */
9274HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9275{
9276 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9277 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9278
9279 /*
9280 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9281 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9282 * must emulate the MOV DRx access.
9283 */
9284 if (!pVmxTransient->fIsNestedGuest)
9285 {
9286 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9287 if ( pVmxTransient->fWasGuestDebugStateActive
9288#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9289 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9290#endif
9291 )
9292 {
9293 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9294 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9295 }
9296
9297 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9298 && !pVmxTransient->fWasHyperDebugStateActive)
9299 {
9300 Assert(!DBGFIsStepping(pVCpu));
9301 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9302
9303 /* Whether we disable intercepting MOV DRx instructions and resume
9304 the current one, or emulate it and keep intercepting them is
9305 configurable. Though it usually comes down to whether there are
9306 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9307#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9308 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9309#else
9310 bool const fResumeInstruction = true;
9311#endif
9312 if (fResumeInstruction)
9313 {
9314 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9315 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9316 AssertRC(rc);
9317 }
9318
9319#ifndef IN_NEM_DARWIN
9320 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9321 VMMRZCallRing3Disable(pVCpu);
9322 HM_DISABLE_PREEMPT(pVCpu);
9323
9324 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9325 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9326 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9327
9328 HM_RESTORE_PREEMPT();
9329 VMMRZCallRing3Enable(pVCpu);
9330#else
9331 CPUMR3NemActivateGuestDebugState(pVCpu);
9332 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9333 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9334#endif
9335
9336 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9337 if (fResumeInstruction)
9338 {
9339#ifdef VBOX_WITH_STATISTICS
9340 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9341 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9342 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9343 else
9344 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9345#endif
9346 return VINF_SUCCESS;
9347 }
9348 }
9349 }
9350
9351 /*
9352 * Import state. We must have DR7 loaded here as it's always consulted,
9353 * both for reading and writing. The other debug registers are never
9354 * exported as such.
9355 */
9356 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9357 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9358 | CPUMCTX_EXTRN_GPRS_MASK
9359 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9360 AssertRCReturn(rc, rc);
9361
9362 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9363 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9364 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9365 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9366
9367 VBOXSTRICTRC rcStrict;
9368 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9369 {
9370 /*
9371 * Write DRx register.
9372 */
9373 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9374 AssertMsg( rcStrict == VINF_SUCCESS
9375 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9376
9377 if (rcStrict == VINF_SUCCESS)
9378 {
9379 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9380 * kept it for now to avoid breaking something non-obvious. */
9381 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9382 | HM_CHANGED_GUEST_DR7);
9383 /* Update the DR6 register if guest debug state is active, otherwise we'll
9384 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9385 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9386 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9387 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9388 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9389 }
9390 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9391 {
9392 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9393 rcStrict = VINF_SUCCESS;
9394 }
9395
9396 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9397 }
9398 else
9399 {
9400 /*
9401 * Read DRx register into a general purpose register.
9402 */
9403 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9404 AssertMsg( rcStrict == VINF_SUCCESS
9405 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9406
9407 if (rcStrict == VINF_SUCCESS)
9408 {
9409 if (iGReg == X86_GREG_xSP)
9410 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9411 | HM_CHANGED_GUEST_RSP);
9412 else
9413 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9414 }
9415 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9416 {
9417 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9418 rcStrict = VINF_SUCCESS;
9419 }
9420
9421 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9422 }
9423
9424 return rcStrict;
9425}
9426
9427
9428/**
9429 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9430 * Conditional VM-exit.
9431 */
9432HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9433{
9434 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9435
9436#ifndef IN_NEM_DARWIN
9437 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9438
9439 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9440 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9441 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9442 | HMVMX_READ_IDT_VECTORING_INFO
9443 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9444 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9445
9446 /*
9447 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9448 */
9449 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9450 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9451 {
9452 /*
9453 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9454 * instruction emulation to inject the original event. Otherwise, injecting the original event
9455 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9456 */
9457 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9458 { /* likely */ }
9459 else
9460 {
9461 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9462# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9463 /** @todo NSTVMX: Think about how this should be handled. */
9464 if (pVmxTransient->fIsNestedGuest)
9465 return VERR_VMX_IPE_3;
9466# endif
9467 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9468 }
9469 }
9470 else
9471 {
9472 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9473 return rcStrict;
9474 }
9475
9476 /*
9477 * Get sufficient state and update the exit history entry.
9478 */
9479 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9480 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9481 AssertRCReturn(rc, rc);
9482
9483 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9484 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9485 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9486 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9487 if (!pExitRec)
9488 {
9489 /*
9490 * If we succeed, resume guest execution.
9491 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9492 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9493 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9494 * weird case. See @bugref{6043}.
9495 */
9496 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9497/** @todo bird: We can probably just go straight to IOM here and assume that
9498 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9499 * well. However, we need to address that aliasing workarounds that
9500 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9501 *
9502 * Might also be interesting to see if we can get this done more or
9503 * less locklessly inside IOM. Need to consider the lookup table
9504 * updating and use a bit more carefully first (or do all updates via
9505 * rendezvous) */
9506 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9507 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9508 if ( rcStrict == VINF_SUCCESS
9509 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9510 || rcStrict == VERR_PAGE_NOT_PRESENT)
9511 {
9512 /* Successfully handled MMIO operation. */
9513 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9514 | HM_CHANGED_GUEST_APIC_TPR);
9515 rcStrict = VINF_SUCCESS;
9516 }
9517 }
9518 else
9519 {
9520 /*
9521 * Frequent exit or something needing probing. Call EMHistoryExec.
9522 */
9523 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9524 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9525
9526 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9527 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9528
9529 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9530 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9531 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9532 }
9533 return rcStrict;
9534#else
9535 AssertFailed();
9536 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9537#endif
9538}
9539
9540
9541/**
9542 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9543 * VM-exit.
9544 */
9545HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9546{
9547 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9548#ifndef IN_NEM_DARWIN
9549 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9550
9551 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9552 | HMVMX_READ_EXIT_INSTR_LEN
9553 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9554 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9555 | HMVMX_READ_IDT_VECTORING_INFO
9556 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9557 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9558
9559 /*
9560 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9561 */
9562 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9563 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9564 {
9565 /*
9566 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9567 * we shall resolve the nested #PF and re-inject the original event.
9568 */
9569 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9570 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9571 }
9572 else
9573 {
9574 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9575 return rcStrict;
9576 }
9577
9578 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9579 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9580 AssertRCReturn(rc, rc);
9581
9582 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9583 uint64_t const uExitQual = pVmxTransient->uExitQual;
9584 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9585
9586 RTGCUINT uErrorCode = 0;
9587 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9588 uErrorCode |= X86_TRAP_PF_ID;
9589 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9590 uErrorCode |= X86_TRAP_PF_RW;
9591 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9592 uErrorCode |= X86_TRAP_PF_P;
9593
9594 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9595 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9596
9597 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9598
9599 /*
9600 * Handle the pagefault trap for the nested shadow table.
9601 */
9602 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9603 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9604 TRPMResetTrap(pVCpu);
9605
9606 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9607 if ( rcStrict == VINF_SUCCESS
9608 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9609 || rcStrict == VERR_PAGE_NOT_PRESENT)
9610 {
9611 /* Successfully synced our nested page tables. */
9612 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9613 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9614 return VINF_SUCCESS;
9615 }
9616 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9617 return rcStrict;
9618
9619#else /* IN_NEM_DARWIN */
9620 PVM pVM = pVCpu->CTX_SUFF(pVM);
9621 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9622 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9623 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9624 vmxHCImportGuestRip(pVCpu);
9625 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9626
9627 /*
9628 * Ask PGM for information about the given GCPhys. We need to check if we're
9629 * out of sync first.
9630 */
9631 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9632 false,
9633 false };
9634 PGMPHYSNEMPAGEINFO Info;
9635 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9636 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9637 if (RT_SUCCESS(rc))
9638 {
9639 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9640 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9641 {
9642 if (State.fCanResume)
9643 {
9644 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9645 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9646 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9647 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9648 State.fDidSomething ? "" : " no-change"));
9649 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9650 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9651 return VINF_SUCCESS;
9652 }
9653 }
9654
9655 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9656 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9657 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9658 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9659 State.fDidSomething ? "" : " no-change"));
9660 }
9661 else
9662 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9663 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9664 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9665
9666 /*
9667 * Emulate the memory access, either access handler or special memory.
9668 */
9669 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9670 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9671 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9672 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9673 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9674
9675 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9676 AssertRCReturn(rc, rc);
9677
9678 VBOXSTRICTRC rcStrict;
9679 if (!pExitRec)
9680 rcStrict = IEMExecOne(pVCpu);
9681 else
9682 {
9683 /* Frequent access or probing. */
9684 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9685 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9686 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9687 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9688 }
9689
9690 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9691
9692 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9693 return rcStrict;
9694#endif /* IN_NEM_DARWIN */
9695}
9696
9697#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9698
9699/**
9700 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9701 */
9702HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9703{
9704 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9705
9706 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9707 | HMVMX_READ_EXIT_INSTR_INFO
9708 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9709 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9710 | CPUMCTX_EXTRN_SREG_MASK
9711 | CPUMCTX_EXTRN_HWVIRT
9712 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9713 AssertRCReturn(rc, rc);
9714
9715 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9716
9717 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9718 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9719
9720 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9721 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9722 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9723 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9724 {
9725 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9726 rcStrict = VINF_SUCCESS;
9727 }
9728 return rcStrict;
9729}
9730
9731
9732/**
9733 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9734 */
9735HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9736{
9737 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9738
9739 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9740 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9741 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9742 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9743 AssertRCReturn(rc, rc);
9744
9745 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9746
9747 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9748 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9749 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9750 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9751 {
9752 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9753 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9754 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9755 }
9756 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9757 return rcStrict;
9758}
9759
9760
9761/**
9762 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9763 */
9764HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9765{
9766 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9767
9768 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9769 | HMVMX_READ_EXIT_INSTR_INFO
9770 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9771 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9772 | CPUMCTX_EXTRN_SREG_MASK
9773 | CPUMCTX_EXTRN_HWVIRT
9774 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9775 AssertRCReturn(rc, rc);
9776
9777 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9778
9779 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9780 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9781
9782 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9783 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9784 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9785 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9786 {
9787 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9788 rcStrict = VINF_SUCCESS;
9789 }
9790 return rcStrict;
9791}
9792
9793
9794/**
9795 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9796 */
9797HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9798{
9799 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9800
9801 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9802 | HMVMX_READ_EXIT_INSTR_INFO
9803 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9804 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9805 | CPUMCTX_EXTRN_SREG_MASK
9806 | CPUMCTX_EXTRN_HWVIRT
9807 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9808 AssertRCReturn(rc, rc);
9809
9810 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9811
9812 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9813 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9814
9815 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9816 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9818 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9819 {
9820 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9821 rcStrict = VINF_SUCCESS;
9822 }
9823 return rcStrict;
9824}
9825
9826
9827/**
9828 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9829 */
9830HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9831{
9832 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9833
9834 /*
9835 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9836 * thus might not need to import the shadow VMCS state, it's safer just in case
9837 * code elsewhere dares look at unsynced VMCS fields.
9838 */
9839 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9840 | HMVMX_READ_EXIT_INSTR_INFO
9841 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9842 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9843 | CPUMCTX_EXTRN_SREG_MASK
9844 | CPUMCTX_EXTRN_HWVIRT
9845 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9846 AssertRCReturn(rc, rc);
9847
9848 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9849
9850 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9851 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9852 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9853
9854 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9855 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9856 {
9857 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9858
9859# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9860 /* Try for exit optimization. This is on the following instruction
9861 because it would be a waste of time to have to reinterpret the
9862 already decoded vmwrite instruction. */
9863 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9864 if (pExitRec)
9865 {
9866 /* Frequent access or probing. */
9867 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9868 AssertRCReturn(rc, rc);
9869
9870 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9871 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9872 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9873 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9874 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9875 }
9876# endif
9877 }
9878 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9879 {
9880 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9881 rcStrict = VINF_SUCCESS;
9882 }
9883 return rcStrict;
9884}
9885
9886
9887/**
9888 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9889 */
9890HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9891{
9892 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9893
9894 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9895 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9896 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9897 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9898 AssertRCReturn(rc, rc);
9899
9900 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9901
9902 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9903 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9904 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9905 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9906 {
9907 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9908 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9909 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9910 }
9911 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9912 return rcStrict;
9913}
9914
9915
9916/**
9917 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9918 */
9919HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9920{
9921 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9922
9923 /*
9924 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9925 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9926 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9927 */
9928 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9929 | HMVMX_READ_EXIT_INSTR_INFO
9930 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9931 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9932 | CPUMCTX_EXTRN_SREG_MASK
9933 | CPUMCTX_EXTRN_HWVIRT
9934 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9935 AssertRCReturn(rc, rc);
9936
9937 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9938
9939 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9940 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9941 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9942
9943 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9944 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9945 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9946 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9947 {
9948 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9949 rcStrict = VINF_SUCCESS;
9950 }
9951 return rcStrict;
9952}
9953
9954
9955/**
9956 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9957 */
9958HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9959{
9960 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9961
9962 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9963 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9964 | CPUMCTX_EXTRN_HWVIRT
9965 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9966 AssertRCReturn(rc, rc);
9967
9968 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9969
9970 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9971 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9972 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9973 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9974 {
9975 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9976 rcStrict = VINF_SUCCESS;
9977 }
9978 return rcStrict;
9979}
9980
9981
9982/**
9983 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9984 */
9985HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9986{
9987 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9988
9989 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9990 | HMVMX_READ_EXIT_INSTR_INFO
9991 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9992 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9993 | CPUMCTX_EXTRN_SREG_MASK
9994 | CPUMCTX_EXTRN_HWVIRT
9995 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9996 AssertRCReturn(rc, rc);
9997
9998 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9999
10000 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10001 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10002
10003 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
10004 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10005 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
10006 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10007 {
10008 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10009 rcStrict = VINF_SUCCESS;
10010 }
10011 return rcStrict;
10012}
10013
10014
10015/**
10016 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
10017 */
10018HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10019{
10020 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10021
10022 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10023 | HMVMX_READ_EXIT_INSTR_INFO
10024 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10025 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10026 | CPUMCTX_EXTRN_SREG_MASK
10027 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10028 AssertRCReturn(rc, rc);
10029
10030 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10031
10032 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10033 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10034
10035 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
10036 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10037 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10038 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10039 {
10040 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10041 rcStrict = VINF_SUCCESS;
10042 }
10043 return rcStrict;
10044}
10045
10046
10047# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10048/**
10049 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
10050 */
10051HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10052{
10053 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10054
10055 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10056 | HMVMX_READ_EXIT_INSTR_INFO
10057 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10058 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10059 | CPUMCTX_EXTRN_SREG_MASK
10060 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10061 AssertRCReturn(rc, rc);
10062
10063 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10064
10065 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10066 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10067
10068 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
10069 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10071 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10072 {
10073 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10074 rcStrict = VINF_SUCCESS;
10075 }
10076 return rcStrict;
10077}
10078# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10079#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10080/** @} */
10081
10082
10083#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10084/** @name Nested-guest VM-exit handlers.
10085 * @{
10086 */
10087/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10088/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10089/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10090
10091/**
10092 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10093 * Conditional VM-exit.
10094 */
10095HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10096{
10097 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10098
10099 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10100
10101 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10102 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10103 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10104
10105 switch (uExitIntType)
10106 {
10107# ifndef IN_NEM_DARWIN
10108 /*
10109 * Physical NMIs:
10110 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10111 */
10112 case VMX_EXIT_INT_INFO_TYPE_NMI:
10113 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10114# endif
10115
10116 /*
10117 * Hardware exceptions,
10118 * Software exceptions,
10119 * Privileged software exceptions:
10120 * Figure out if the exception must be delivered to the guest or the nested-guest.
10121 */
10122 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10123 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10124 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10125 {
10126 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10127 | HMVMX_READ_EXIT_INSTR_LEN
10128 | HMVMX_READ_IDT_VECTORING_INFO
10129 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10130
10131 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10132 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10133 {
10134 /* Exit qualification is required for debug and page-fault exceptions. */
10135 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10136
10137 /*
10138 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10139 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10140 * length. However, if delivery of a software interrupt, software exception or privileged
10141 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10142 */
10143 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10144 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10145 pVmxTransient->uExitIntErrorCode,
10146 pVmxTransient->uIdtVectoringInfo,
10147 pVmxTransient->uIdtVectoringErrorCode);
10148#ifdef DEBUG_ramshankar
10149 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10150 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10151 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10152 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10153 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10154 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10155#endif
10156 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10157 }
10158
10159 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10160 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10161 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10162 }
10163
10164 /*
10165 * Software interrupts:
10166 * VM-exits cannot be caused by software interrupts.
10167 *
10168 * External interrupts:
10169 * This should only happen when "acknowledge external interrupts on VM-exit"
10170 * control is set. However, we never set this when executing a guest or
10171 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10172 * the guest.
10173 */
10174 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10175 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10176 default:
10177 {
10178 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10179 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10180 }
10181 }
10182}
10183
10184
10185/**
10186 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10187 * Unconditional VM-exit.
10188 */
10189HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10190{
10191 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10192 return IEMExecVmxVmexitTripleFault(pVCpu);
10193}
10194
10195
10196/**
10197 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10198 */
10199HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10200{
10201 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10202
10203 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10204 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10205 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10206}
10207
10208
10209/**
10210 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10211 */
10212HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10213{
10214 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10215
10216 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10217 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10218 return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
10219}
10220
10221
10222/**
10223 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10224 * Unconditional VM-exit.
10225 */
10226HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10227{
10228 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10229
10230 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10231 | HMVMX_READ_EXIT_INSTR_LEN
10232 | HMVMX_READ_IDT_VECTORING_INFO
10233 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10234
10235 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10236 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10237 pVmxTransient->uIdtVectoringErrorCode);
10238 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10239}
10240
10241
10242/**
10243 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10244 */
10245HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10246{
10247 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10248
10249 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10250 {
10251 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10252 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10253 }
10254 return vmxHCExitHlt(pVCpu, pVmxTransient);
10255}
10256
10257
10258/**
10259 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10260 */
10261HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10262{
10263 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10264
10265 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10266 {
10267 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10268 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10269 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10270 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10271 }
10272 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10273}
10274
10275
10276/**
10277 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10278 */
10279HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10280{
10281 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10282
10283 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10284 {
10285 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10286 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10287 }
10288 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10289}
10290
10291
10292/**
10293 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10294 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10295 */
10296HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10297{
10298 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10299
10300 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10301 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10302
10303 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10304
10305 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10306 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10307 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10308
10309 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10310 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10311 u64VmcsField &= UINT64_C(0xffffffff);
10312
10313 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10314 {
10315 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10316 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10317 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10318 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10319 }
10320
10321 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10322 return vmxHCExitVmread(pVCpu, pVmxTransient);
10323 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10324}
10325
10326
10327/**
10328 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10329 */
10330HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10331{
10332 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10333
10334 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10335 {
10336 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10337 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10338 }
10339
10340 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10341}
10342
10343
10344/**
10345 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10346 * Conditional VM-exit.
10347 */
10348HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10349{
10350 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10351
10352 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10353 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10354
10355 VBOXSTRICTRC rcStrict;
10356 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10357 switch (uAccessType)
10358 {
10359 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10360 {
10361 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10362 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10363 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10364 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10365
10366 bool fIntercept;
10367 switch (iCrReg)
10368 {
10369 case 0:
10370 case 4:
10371 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10372 break;
10373
10374 case 3:
10375 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10376 break;
10377
10378 case 8:
10379 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10380 break;
10381
10382 default:
10383 fIntercept = false;
10384 break;
10385 }
10386 if (fIntercept)
10387 {
10388 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10389 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10390 }
10391 else
10392 {
10393 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10394 AssertRCReturn(rc, rc);
10395 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10396 }
10397 break;
10398 }
10399
10400 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10401 {
10402 /*
10403 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10404 * CR2 reads do not cause a VM-exit.
10405 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10406 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10407 */
10408 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10409 if ( iCrReg == 3
10410 || iCrReg == 8)
10411 {
10412 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10413 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10414 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10415 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10416 {
10417 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10418 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10419 }
10420 else
10421 {
10422 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10423 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10424 }
10425 }
10426 else
10427 {
10428 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10429 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10430 }
10431 break;
10432 }
10433
10434 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10435 {
10436 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10437 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10438 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10439 if ( (uGstHostMask & X86_CR0_TS)
10440 && (uReadShadow & X86_CR0_TS))
10441 {
10442 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10443 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10444 }
10445 else
10446 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10447 break;
10448 }
10449
10450 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10451 {
10452 RTGCPTR GCPtrEffDst;
10453 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10454 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10455 if (fMemOperand)
10456 {
10457 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10458 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10459 }
10460 else
10461 GCPtrEffDst = NIL_RTGCPTR;
10462
10463 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10464 {
10465 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10466 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10467 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10468 }
10469 else
10470 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10471 break;
10472 }
10473
10474 default:
10475 {
10476 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10477 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10478 }
10479 }
10480
10481 if (rcStrict == VINF_IEM_RAISED_XCPT)
10482 {
10483 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10484 rcStrict = VINF_SUCCESS;
10485 }
10486 return rcStrict;
10487}
10488
10489
10490/**
10491 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10492 * Conditional VM-exit.
10493 */
10494HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10495{
10496 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10497
10498 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10499 {
10500 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10501 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10502 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10503 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10504 }
10505 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10506}
10507
10508
10509/**
10510 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10511 * Conditional VM-exit.
10512 */
10513HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10514{
10515 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10516
10517 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10518
10519 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10520 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10521 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10522
10523 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10524 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10525 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10526 {
10527 /*
10528 * IN/OUT instruction:
10529 * - Provides VM-exit instruction length.
10530 *
10531 * INS/OUTS instruction:
10532 * - Provides VM-exit instruction length.
10533 * - Provides Guest-linear address.
10534 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10535 */
10536 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10537 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10538
10539 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10540 pVmxTransient->ExitInstrInfo.u = 0;
10541 pVmxTransient->uGuestLinearAddr = 0;
10542
10543 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10544 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10545 if (fIOString)
10546 {
10547 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10548 if (fVmxInsOutsInfo)
10549 {
10550 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10551 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10552 }
10553 }
10554
10555 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10556 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10557 }
10558 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10559}
10560
10561
10562/**
10563 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10564 */
10565HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10566{
10567 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10568
10569 uint32_t fMsrpm;
10570 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10571 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10572 else
10573 fMsrpm = VMXMSRPM_EXIT_RD;
10574
10575 if (fMsrpm & VMXMSRPM_EXIT_RD)
10576 {
10577 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10578 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10579 }
10580 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10581}
10582
10583
10584/**
10585 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10586 */
10587HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10588{
10589 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10590
10591 uint32_t fMsrpm;
10592 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10593 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10594 else
10595 fMsrpm = VMXMSRPM_EXIT_WR;
10596
10597 if (fMsrpm & VMXMSRPM_EXIT_WR)
10598 {
10599 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10600 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10601 }
10602 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10603}
10604
10605
10606/**
10607 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10608 */
10609HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10610{
10611 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10612
10613 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10614 {
10615 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10616 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10617 }
10618 return vmxHCExitMwait(pVCpu, pVmxTransient);
10619}
10620
10621
10622/**
10623 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10624 * VM-exit.
10625 */
10626HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10627{
10628 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10629
10630 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10631 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10632 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10633 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10634}
10635
10636
10637/**
10638 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10639 */
10640HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10641{
10642 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10643
10644 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10645 {
10646 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10647 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10648 }
10649 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10650}
10651
10652
10653/**
10654 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10655 */
10656HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10657{
10658 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10659
10660 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10661 * PAUSE when executing a nested-guest? If it does not, we would not need
10662 * to check for the intercepts here. Just call VM-exit... */
10663
10664 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10665 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10666 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10667 {
10668 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10669 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10670 }
10671 return vmxHCExitPause(pVCpu, pVmxTransient);
10672}
10673
10674
10675/**
10676 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10677 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10678 */
10679HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10680{
10681 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10682
10683 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10684 {
10685 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10686 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10687 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10688 }
10689 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10690}
10691
10692
10693/**
10694 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10695 * VM-exit.
10696 */
10697HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10698{
10699 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10700
10701 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10702 | HMVMX_READ_EXIT_INSTR_LEN
10703 | HMVMX_READ_IDT_VECTORING_INFO
10704 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10705
10706 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10707
10708 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10709 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10710
10711 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10712 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10713 pVmxTransient->uIdtVectoringErrorCode);
10714 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10715}
10716
10717
10718/**
10719 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10720 * Conditional VM-exit.
10721 */
10722HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10723{
10724 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10725
10726 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10727 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10728 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10729}
10730
10731
10732/**
10733 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10734 * Conditional VM-exit.
10735 */
10736HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10737{
10738 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10739
10740 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10741 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10742 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10743}
10744
10745
10746/**
10747 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10748 */
10749HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10750{
10751 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10752
10753 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10754 {
10755 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10756 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10757 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10758 }
10759 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10760}
10761
10762
10763/**
10764 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10765 */
10766HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10767{
10768 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10769
10770 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10771 {
10772 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10773 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10774 }
10775 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10776}
10777
10778
10779/**
10780 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10781 */
10782HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10783{
10784 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10785
10786 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10787 {
10788 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10789 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10790 | HMVMX_READ_EXIT_INSTR_INFO
10791 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10792 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10793 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10794 }
10795 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10796}
10797
10798
10799/**
10800 * Nested-guest VM-exit handler for invalid-guest state
10801 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10802 */
10803HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10804{
10805 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10806
10807 /*
10808 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10809 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10810 * Handle it like it's in an invalid guest state of the outer guest.
10811 *
10812 * When the fast path is implemented, this should be changed to cause the corresponding
10813 * nested-guest VM-exit.
10814 */
10815 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10816}
10817
10818
10819/**
10820 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10821 * and only provide the instruction length.
10822 *
10823 * Unconditional VM-exit.
10824 */
10825HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10826{
10827 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10828
10829#ifdef VBOX_STRICT
10830 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10831 switch (pVmxTransient->uExitReason)
10832 {
10833 case VMX_EXIT_ENCLS:
10834 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10835 break;
10836
10837 case VMX_EXIT_VMFUNC:
10838 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10839 break;
10840 }
10841#endif
10842
10843 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10844 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10845}
10846
10847
10848/**
10849 * Nested-guest VM-exit handler for instructions that provide instruction length as
10850 * well as more information.
10851 *
10852 * Unconditional VM-exit.
10853 */
10854HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10855{
10856 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10857
10858# ifdef VBOX_STRICT
10859 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10860 switch (pVmxTransient->uExitReason)
10861 {
10862 case VMX_EXIT_GDTR_IDTR_ACCESS:
10863 case VMX_EXIT_LDTR_TR_ACCESS:
10864 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10865 break;
10866
10867 case VMX_EXIT_RDRAND:
10868 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10869 break;
10870
10871 case VMX_EXIT_RDSEED:
10872 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10873 break;
10874
10875 case VMX_EXIT_XSAVES:
10876 case VMX_EXIT_XRSTORS:
10877 /** @todo NSTVMX: Verify XSS-bitmap. */
10878 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10879 break;
10880
10881 case VMX_EXIT_UMWAIT:
10882 case VMX_EXIT_TPAUSE:
10883 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10884 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10885 break;
10886
10887 case VMX_EXIT_LOADIWKEY:
10888 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10889 break;
10890 }
10891# endif
10892
10893 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10894 | HMVMX_READ_EXIT_INSTR_LEN
10895 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10896 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10897 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10898}
10899
10900# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10901
10902/**
10903 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10904 * Conditional VM-exit.
10905 */
10906HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10907{
10908 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10909 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10910
10911 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10912 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10913 {
10914 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10915 | HMVMX_READ_EXIT_INSTR_LEN
10916 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10917 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10918 | HMVMX_READ_IDT_VECTORING_INFO
10919 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10920 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10921 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10922 AssertRCReturn(rc, rc);
10923
10924 /*
10925 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10926 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10927 * it's its problem to deal with that issue and we'll clear the recovered event.
10928 */
10929 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10930 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10931 { /*likely*/ }
10932 else
10933 {
10934 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10935 return rcStrict;
10936 }
10937 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10938
10939 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10940 uint64_t const uExitQual = pVmxTransient->uExitQual;
10941
10942 RTGCPTR GCPtrNestedFault;
10943 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10944 if (fIsLinearAddrValid)
10945 {
10946 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10947 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10948 }
10949 else
10950 GCPtrNestedFault = 0;
10951
10952 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10953 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10954 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10955 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10956 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10957
10958 PGMPTWALK Walk;
10959 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10960 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10961 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10962 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10963 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10964 if (RT_SUCCESS(rcStrict))
10965 {
10966 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
10967 {
10968 Assert(!fClearEventOnForward);
10969 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
10970 rcStrict = VINF_EM_RESCHEDULE_REM;
10971 }
10972 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10973 return rcStrict;
10974 }
10975
10976 if (fClearEventOnForward)
10977 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10978
10979 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10980 pVmxTransient->uIdtVectoringErrorCode);
10981 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10982 {
10983 VMXVEXITINFO const ExitInfo
10984 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10985 pVmxTransient->uExitQual,
10986 pVmxTransient->cbExitInstr,
10987 pVmxTransient->uGuestLinearAddr,
10988 pVmxTransient->uGuestPhysicalAddr);
10989 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10990 }
10991
10992 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10993 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10994 }
10995
10996 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10997}
10998
10999
11000/**
11001 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11002 * Conditional VM-exit.
11003 */
11004HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11005{
11006 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11007 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
11008
11009 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11010 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
11011 {
11012 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
11013 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
11014 AssertRCReturn(rc, rc);
11015
11016 PGMPTWALK Walk;
11017 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11018 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11019 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
11020 GCPhysNestedFault, false /* fIsLinearAddrValid */,
11021 0 /* GCPtrNestedFault */, &Walk);
11022 if (RT_SUCCESS(rcStrict))
11023 {
11024 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
11025 return rcStrict;
11026 }
11027
11028 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
11029 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
11030 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
11031
11032 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11033 pVmxTransient->uIdtVectoringErrorCode);
11034 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11035 }
11036
11037 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
11038}
11039
11040# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
11041
11042/** @} */
11043#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11044
11045
11046/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11047 * probes.
11048 *
11049 * The following few functions and associated structure contains the bloat
11050 * necessary for providing detailed debug events and dtrace probes as well as
11051 * reliable host side single stepping. This works on the principle of
11052 * "subclassing" the normal execution loop and workers. We replace the loop
11053 * method completely and override selected helpers to add necessary adjustments
11054 * to their core operation.
11055 *
11056 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11057 * any performance for debug and analysis features.
11058 *
11059 * @{
11060 */
11061
11062/**
11063 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11064 * the debug run loop.
11065 */
11066typedef struct VMXRUNDBGSTATE
11067{
11068 /** The RIP we started executing at. This is for detecting that we stepped. */
11069 uint64_t uRipStart;
11070 /** The CS we started executing with. */
11071 uint16_t uCsStart;
11072
11073 /** Whether we've actually modified the 1st execution control field. */
11074 bool fModifiedProcCtls : 1;
11075 /** Whether we've actually modified the 2nd execution control field. */
11076 bool fModifiedProcCtls2 : 1;
11077 /** Whether we've actually modified the exception bitmap. */
11078 bool fModifiedXcptBitmap : 1;
11079
11080 /** We desire the modified the CR0 mask to be cleared. */
11081 bool fClearCr0Mask : 1;
11082 /** We desire the modified the CR4 mask to be cleared. */
11083 bool fClearCr4Mask : 1;
11084 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11085 uint32_t fCpe1Extra;
11086 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11087 uint32_t fCpe1Unwanted;
11088 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11089 uint32_t fCpe2Extra;
11090 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11091 uint32_t bmXcptExtra;
11092 /** The sequence number of the Dtrace provider settings the state was
11093 * configured against. */
11094 uint32_t uDtraceSettingsSeqNo;
11095 /** VM-exits to check (one bit per VM-exit). */
11096 uint32_t bmExitsToCheck[3];
11097
11098 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11099 uint32_t fProcCtlsInitial;
11100 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11101 uint32_t fProcCtls2Initial;
11102 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11103 uint32_t bmXcptInitial;
11104} VMXRUNDBGSTATE;
11105AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11106typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11107
11108
11109/**
11110 * Initializes the VMXRUNDBGSTATE structure.
11111 *
11112 * @param pVCpu The cross context virtual CPU structure of the
11113 * calling EMT.
11114 * @param pVmxTransient The VMX-transient structure.
11115 * @param pDbgState The debug state to initialize.
11116 */
11117static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11118{
11119 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11120 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11121
11122 pDbgState->fModifiedProcCtls = false;
11123 pDbgState->fModifiedProcCtls2 = false;
11124 pDbgState->fModifiedXcptBitmap = false;
11125 pDbgState->fClearCr0Mask = false;
11126 pDbgState->fClearCr4Mask = false;
11127 pDbgState->fCpe1Extra = 0;
11128 pDbgState->fCpe1Unwanted = 0;
11129 pDbgState->fCpe2Extra = 0;
11130 pDbgState->bmXcptExtra = 0;
11131 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11132 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11133 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11134}
11135
11136
11137/**
11138 * Updates the VMSC fields with changes requested by @a pDbgState.
11139 *
11140 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11141 * immediately before executing guest code, i.e. when interrupts are disabled.
11142 * We don't check status codes here as we cannot easily assert or return in the
11143 * latter case.
11144 *
11145 * @param pVCpu The cross context virtual CPU structure.
11146 * @param pVmxTransient The VMX-transient structure.
11147 * @param pDbgState The debug state.
11148 */
11149static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11150{
11151 /*
11152 * Ensure desired flags in VMCS control fields are set.
11153 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11154 *
11155 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11156 * there should be no stale data in pCtx at this point.
11157 */
11158 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11159 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11160 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11161 {
11162 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11163 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11164 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11165 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11166 pDbgState->fModifiedProcCtls = true;
11167 }
11168
11169 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11170 {
11171 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11172 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11173 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11174 pDbgState->fModifiedProcCtls2 = true;
11175 }
11176
11177 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11178 {
11179 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11180 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11181 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11182 pDbgState->fModifiedXcptBitmap = true;
11183 }
11184
11185 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11186 {
11187 pVmcsInfo->u64Cr0Mask = 0;
11188 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11189 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11190 }
11191
11192 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11193 {
11194 pVmcsInfo->u64Cr4Mask = 0;
11195 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11196 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11197 }
11198
11199 NOREF(pVCpu);
11200}
11201
11202
11203/**
11204 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11205 * re-entry next time around.
11206 *
11207 * @returns Strict VBox status code (i.e. informational status codes too).
11208 * @param pVCpu The cross context virtual CPU structure.
11209 * @param pVmxTransient The VMX-transient structure.
11210 * @param pDbgState The debug state.
11211 * @param rcStrict The return code from executing the guest using single
11212 * stepping.
11213 */
11214static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11215 VBOXSTRICTRC rcStrict)
11216{
11217 /*
11218 * Restore VM-exit control settings as we may not reenter this function the
11219 * next time around.
11220 */
11221 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11222
11223 /* We reload the initial value, trigger what we can of recalculations the
11224 next time around. From the looks of things, that's all that's required atm. */
11225 if (pDbgState->fModifiedProcCtls)
11226 {
11227 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11228 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11229 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11230 AssertRC(rc2);
11231 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11232 }
11233
11234 /* We're currently the only ones messing with this one, so just restore the
11235 cached value and reload the field. */
11236 if ( pDbgState->fModifiedProcCtls2
11237 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11238 {
11239 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11240 AssertRC(rc2);
11241 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11242 }
11243
11244 /* If we've modified the exception bitmap, we restore it and trigger
11245 reloading and partial recalculation the next time around. */
11246 if (pDbgState->fModifiedXcptBitmap)
11247 {
11248 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11249 AssertRC(rc2);
11250 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11251 }
11252
11253 return rcStrict;
11254}
11255
11256
11257/**
11258 * Configures VM-exit controls for current DBGF and DTrace settings.
11259 *
11260 * This updates @a pDbgState and the VMCS execution control fields to reflect
11261 * the necessary VM-exits demanded by DBGF and DTrace.
11262 *
11263 * @param pVCpu The cross context virtual CPU structure.
11264 * @param pVmxTransient The VMX-transient structure. May update
11265 * fUpdatedTscOffsettingAndPreemptTimer.
11266 * @param pDbgState The debug state.
11267 */
11268static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11269{
11270#ifndef IN_NEM_DARWIN
11271 /*
11272 * Take down the dtrace serial number so we can spot changes.
11273 */
11274 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11275 ASMCompilerBarrier();
11276#endif
11277
11278 /*
11279 * We'll rebuild most of the middle block of data members (holding the
11280 * current settings) as we go along here, so start by clearing it all.
11281 */
11282 pDbgState->bmXcptExtra = 0;
11283 pDbgState->fCpe1Extra = 0;
11284 pDbgState->fCpe1Unwanted = 0;
11285 pDbgState->fCpe2Extra = 0;
11286 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11287 pDbgState->bmExitsToCheck[i] = 0;
11288
11289 /*
11290 * Software interrupts (INT XXh) - no idea how to trigger these...
11291 */
11292 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11293 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11294 || VBOXVMM_INT_SOFTWARE_ENABLED())
11295 {
11296 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11297 }
11298
11299 /*
11300 * INT3 breakpoints - triggered by #BP exceptions.
11301 */
11302 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11303 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11304
11305 /*
11306 * Exception bitmap and XCPT events+probes.
11307 */
11308 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11309 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11310 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11311
11312 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11313 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11314 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11315 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11316 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11317 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11318 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11319 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11320 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11321 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11322 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11323 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11324 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11325 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11326 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11327 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11328 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11329 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11330
11331 if (pDbgState->bmXcptExtra)
11332 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11333
11334 /*
11335 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11336 *
11337 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11338 * So, when adding/changing/removing please don't forget to update it.
11339 *
11340 * Some of the macros are picking up local variables to save horizontal space,
11341 * (being able to see it in a table is the lesser evil here).
11342 */
11343#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11344 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11345 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11346#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11347 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11348 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11349 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11350 } else do { } while (0)
11351#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11352 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11353 { \
11354 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11355 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11356 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11357 } else do { } while (0)
11358#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11359 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11360 { \
11361 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11362 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11363 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11364 } else do { } while (0)
11365#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11366 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11367 { \
11368 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11369 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11370 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11371 } else do { } while (0)
11372
11373 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11374 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11375 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11376 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11377 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11378
11379 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11380 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11381 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11382 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11383 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11384 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11385 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11386 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11387 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11388 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11389 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11390 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11391 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11392 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11393 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11394 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11395 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11396 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11397 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11398 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11399 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11400 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11401 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11402 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11403 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11404 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11405 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11406 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11407 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11408 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11409 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11410 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11411 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11412 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11413 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11414 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11415
11416 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11417 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11418 {
11419 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11420 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11421 AssertRC(rc);
11422
11423#if 0 /** @todo fix me */
11424 pDbgState->fClearCr0Mask = true;
11425 pDbgState->fClearCr4Mask = true;
11426#endif
11427 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11428 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11429 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11430 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11431 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11432 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11433 require clearing here and in the loop if we start using it. */
11434 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11435 }
11436 else
11437 {
11438 if (pDbgState->fClearCr0Mask)
11439 {
11440 pDbgState->fClearCr0Mask = false;
11441 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11442 }
11443 if (pDbgState->fClearCr4Mask)
11444 {
11445 pDbgState->fClearCr4Mask = false;
11446 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11447 }
11448 }
11449 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11450 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11451
11452 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11453 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11454 {
11455 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11456 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11457 }
11458 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11459 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11460
11461 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11462 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11463 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11464 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11465 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11466 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11467 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11468 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11469#if 0 /** @todo too slow, fix handler. */
11470 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11471#endif
11472 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11473
11474 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11475 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11476 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11477 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11478 {
11479 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11480 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11481 }
11482 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11483 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11484 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11485 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11486
11487 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11488 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11489 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11490 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11491 {
11492 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11493 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11494 }
11495 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11496 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11497 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11498 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11499
11500 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11501 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11502 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11503 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11504 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11505 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11506 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11507 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11508 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11509 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11510 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11511 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11512 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11513 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11514 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11515 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11516 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11517 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11518 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11519 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11520 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11521 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11522
11523#undef IS_EITHER_ENABLED
11524#undef SET_ONLY_XBM_IF_EITHER_EN
11525#undef SET_CPE1_XBM_IF_EITHER_EN
11526#undef SET_CPEU_XBM_IF_EITHER_EN
11527#undef SET_CPE2_XBM_IF_EITHER_EN
11528
11529 /*
11530 * Sanitize the control stuff.
11531 */
11532 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11533 if (pDbgState->fCpe2Extra)
11534 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11535 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11536 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11537#ifndef IN_NEM_DARWIN
11538 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11539 {
11540 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11541 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11542 }
11543#else
11544 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11545 {
11546 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11547 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11548 }
11549#endif
11550
11551 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11552 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11553 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11554 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11555}
11556
11557
11558/**
11559 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11560 * appropriate.
11561 *
11562 * The caller has checked the VM-exit against the
11563 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11564 * already, so we don't have to do that either.
11565 *
11566 * @returns Strict VBox status code (i.e. informational status codes too).
11567 * @param pVCpu The cross context virtual CPU structure.
11568 * @param pVmxTransient The VMX-transient structure.
11569 * @param uExitReason The VM-exit reason.
11570 *
11571 * @remarks The name of this function is displayed by dtrace, so keep it short
11572 * and to the point. No longer than 33 chars long, please.
11573 */
11574static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11575{
11576 /*
11577 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11578 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11579 *
11580 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11581 * does. Must add/change/remove both places. Same ordering, please.
11582 *
11583 * Added/removed events must also be reflected in the next section
11584 * where we dispatch dtrace events.
11585 */
11586 bool fDtrace1 = false;
11587 bool fDtrace2 = false;
11588 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11589 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11590 uint32_t uEventArg = 0;
11591#define SET_EXIT(a_EventSubName) \
11592 do { \
11593 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11594 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11595 } while (0)
11596#define SET_BOTH(a_EventSubName) \
11597 do { \
11598 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11599 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11600 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11601 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11602 } while (0)
11603 switch (uExitReason)
11604 {
11605 case VMX_EXIT_MTF:
11606 return vmxHCExitMtf(pVCpu, pVmxTransient);
11607
11608 case VMX_EXIT_XCPT_OR_NMI:
11609 {
11610 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11611 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11612 {
11613 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11614 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11615 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11616 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11617 {
11618 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11619 {
11620 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11621 uEventArg = pVmxTransient->uExitIntErrorCode;
11622 }
11623 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11624 switch (enmEvent1)
11625 {
11626 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11627 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11628 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11629 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11630 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11631 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11632 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11633 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11634 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11635 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11636 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11637 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11638 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11639 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11640 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11641 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11642 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11643 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11644 default: break;
11645 }
11646 }
11647 else
11648 AssertFailed();
11649 break;
11650
11651 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11652 uEventArg = idxVector;
11653 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11654 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11655 break;
11656 }
11657 break;
11658 }
11659
11660 case VMX_EXIT_TRIPLE_FAULT:
11661 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11662 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11663 break;
11664 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11665 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11666 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11667 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11668 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11669
11670 /* Instruction specific VM-exits: */
11671 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11672 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11673 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11674 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11675 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11676 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11677 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11678 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11679 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11680 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11681 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11682 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11683 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11684 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11685 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11686 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11687 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11688 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11689 case VMX_EXIT_MOV_CRX:
11690 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11691 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11692 SET_BOTH(CRX_READ);
11693 else
11694 SET_BOTH(CRX_WRITE);
11695 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11696 break;
11697 case VMX_EXIT_MOV_DRX:
11698 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11699 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11700 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11701 SET_BOTH(DRX_READ);
11702 else
11703 SET_BOTH(DRX_WRITE);
11704 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11705 break;
11706 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11707 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11708 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11709 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11710 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11711 case VMX_EXIT_GDTR_IDTR_ACCESS:
11712 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11713 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11714 {
11715 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11716 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11717 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11718 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11719 }
11720 break;
11721
11722 case VMX_EXIT_LDTR_TR_ACCESS:
11723 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11724 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11725 {
11726 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11727 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11728 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11729 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11730 }
11731 break;
11732
11733 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11734 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11735 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11736 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11737 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11738 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11739 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11740 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11741 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11742 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11743 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11744
11745 /* Events that aren't relevant at this point. */
11746 case VMX_EXIT_EXT_INT:
11747 case VMX_EXIT_INT_WINDOW:
11748 case VMX_EXIT_NMI_WINDOW:
11749 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11750 case VMX_EXIT_PREEMPT_TIMER:
11751 case VMX_EXIT_IO_INSTR:
11752 break;
11753
11754 /* Errors and unexpected events. */
11755 case VMX_EXIT_INIT_SIGNAL:
11756 case VMX_EXIT_SIPI:
11757 case VMX_EXIT_IO_SMI:
11758 case VMX_EXIT_SMI:
11759 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11760 case VMX_EXIT_ERR_MSR_LOAD:
11761 case VMX_EXIT_ERR_MACHINE_CHECK:
11762 case VMX_EXIT_PML_FULL:
11763 case VMX_EXIT_VIRTUALIZED_EOI:
11764 break;
11765
11766 default:
11767 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11768 break;
11769 }
11770#undef SET_BOTH
11771#undef SET_EXIT
11772
11773 /*
11774 * Dtrace tracepoints go first. We do them here at once so we don't
11775 * have to copy the guest state saving and stuff a few dozen times.
11776 * Down side is that we've got to repeat the switch, though this time
11777 * we use enmEvent since the probes are a subset of what DBGF does.
11778 */
11779 if (fDtrace1 || fDtrace2)
11780 {
11781 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11782 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11783 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11784 switch (enmEvent1)
11785 {
11786 /** @todo consider which extra parameters would be helpful for each probe. */
11787 case DBGFEVENT_END: break;
11788 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11789 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11790 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11791 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11792 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11793 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11794 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11795 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11796 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11797 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11798 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11799 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11800 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11801 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11802 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11803 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11804 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11805 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11806 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11807 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11808 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11809 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11810 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11811 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11812 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11813 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11814 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11815 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11816 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11817 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11818 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11819 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11820 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11821 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11822 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11823 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11824 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11825 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11826 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11827 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11828 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11829 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11830 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11831 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11832 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11833 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11834 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11835 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11836 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11837 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11838 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11839 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11840 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11841 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11842 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11843 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11844 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11845 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11846 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11847 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11848 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11849 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11850 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11851 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11852 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11853 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11854 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11855 }
11856 switch (enmEvent2)
11857 {
11858 /** @todo consider which extra parameters would be helpful for each probe. */
11859 case DBGFEVENT_END: break;
11860 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11861 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11862 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11863 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11864 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11865 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11866 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11867 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11868 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11869 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11870 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11871 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11872 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11873 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11874 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11875 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11876 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11877 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11878 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11879 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11880 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11881 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11882 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11883 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11884 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11885 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11886 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11887 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11888 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11889 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11890 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11891 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11892 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11893 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11894 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11895 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11896 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11897 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11898 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11899 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11900 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11901 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11902 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11903 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11904 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11905 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11906 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11907 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11908 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11909 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11910 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11911 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11912 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11913 }
11914 }
11915
11916 /*
11917 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11918 * the DBGF call will do a full check).
11919 *
11920 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11921 * Note! If we have to events, we prioritize the first, i.e. the instruction
11922 * one, in order to avoid event nesting.
11923 */
11924 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11925 if ( enmEvent1 != DBGFEVENT_END
11926 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11927 {
11928 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11929 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11930 if (rcStrict != VINF_SUCCESS)
11931 return rcStrict;
11932 }
11933 else if ( enmEvent2 != DBGFEVENT_END
11934 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11935 {
11936 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11937 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11938 if (rcStrict != VINF_SUCCESS)
11939 return rcStrict;
11940 }
11941
11942 return VINF_SUCCESS;
11943}
11944
11945
11946/**
11947 * Single-stepping VM-exit filtering.
11948 *
11949 * This is preprocessing the VM-exits and deciding whether we've gotten far
11950 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11951 * handling is performed.
11952 *
11953 * @returns Strict VBox status code (i.e. informational status codes too).
11954 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11955 * @param pVmxTransient The VMX-transient structure.
11956 * @param pDbgState The debug state.
11957 */
11958DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11959{
11960 /*
11961 * Expensive (saves context) generic dtrace VM-exit probe.
11962 */
11963 uint32_t const uExitReason = pVmxTransient->uExitReason;
11964 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11965 { /* more likely */ }
11966 else
11967 {
11968 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11969 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11970 AssertRC(rc);
11971 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11972 }
11973
11974#ifndef IN_NEM_DARWIN
11975 /*
11976 * Check for host NMI, just to get that out of the way.
11977 */
11978 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11979 { /* normally likely */ }
11980 else
11981 {
11982 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11983 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11984 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11985 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11986 }
11987#endif
11988
11989 /*
11990 * Check for single stepping event if we're stepping.
11991 */
11992 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11993 {
11994 switch (uExitReason)
11995 {
11996 case VMX_EXIT_MTF:
11997 return vmxHCExitMtf(pVCpu, pVmxTransient);
11998
11999 /* Various events: */
12000 case VMX_EXIT_XCPT_OR_NMI:
12001 case VMX_EXIT_EXT_INT:
12002 case VMX_EXIT_TRIPLE_FAULT:
12003 case VMX_EXIT_INT_WINDOW:
12004 case VMX_EXIT_NMI_WINDOW:
12005 case VMX_EXIT_TASK_SWITCH:
12006 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12007 case VMX_EXIT_APIC_ACCESS:
12008 case VMX_EXIT_EPT_VIOLATION:
12009 case VMX_EXIT_EPT_MISCONFIG:
12010 case VMX_EXIT_PREEMPT_TIMER:
12011
12012 /* Instruction specific VM-exits: */
12013 case VMX_EXIT_CPUID:
12014 case VMX_EXIT_GETSEC:
12015 case VMX_EXIT_HLT:
12016 case VMX_EXIT_INVD:
12017 case VMX_EXIT_INVLPG:
12018 case VMX_EXIT_RDPMC:
12019 case VMX_EXIT_RDTSC:
12020 case VMX_EXIT_RSM:
12021 case VMX_EXIT_VMCALL:
12022 case VMX_EXIT_VMCLEAR:
12023 case VMX_EXIT_VMLAUNCH:
12024 case VMX_EXIT_VMPTRLD:
12025 case VMX_EXIT_VMPTRST:
12026 case VMX_EXIT_VMREAD:
12027 case VMX_EXIT_VMRESUME:
12028 case VMX_EXIT_VMWRITE:
12029 case VMX_EXIT_VMXOFF:
12030 case VMX_EXIT_VMXON:
12031 case VMX_EXIT_MOV_CRX:
12032 case VMX_EXIT_MOV_DRX:
12033 case VMX_EXIT_IO_INSTR:
12034 case VMX_EXIT_RDMSR:
12035 case VMX_EXIT_WRMSR:
12036 case VMX_EXIT_MWAIT:
12037 case VMX_EXIT_MONITOR:
12038 case VMX_EXIT_PAUSE:
12039 case VMX_EXIT_GDTR_IDTR_ACCESS:
12040 case VMX_EXIT_LDTR_TR_ACCESS:
12041 case VMX_EXIT_INVEPT:
12042 case VMX_EXIT_RDTSCP:
12043 case VMX_EXIT_INVVPID:
12044 case VMX_EXIT_WBINVD:
12045 case VMX_EXIT_XSETBV:
12046 case VMX_EXIT_RDRAND:
12047 case VMX_EXIT_INVPCID:
12048 case VMX_EXIT_VMFUNC:
12049 case VMX_EXIT_RDSEED:
12050 case VMX_EXIT_XSAVES:
12051 case VMX_EXIT_XRSTORS:
12052 {
12053 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12054 AssertRCReturn(rc, rc);
12055 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12056 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12057 return VINF_EM_DBG_STEPPED;
12058 break;
12059 }
12060
12061 /* Errors and unexpected events: */
12062 case VMX_EXIT_INIT_SIGNAL:
12063 case VMX_EXIT_SIPI:
12064 case VMX_EXIT_IO_SMI:
12065 case VMX_EXIT_SMI:
12066 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12067 case VMX_EXIT_ERR_MSR_LOAD:
12068 case VMX_EXIT_ERR_MACHINE_CHECK:
12069 case VMX_EXIT_PML_FULL:
12070 case VMX_EXIT_VIRTUALIZED_EOI:
12071 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12072 break;
12073
12074 default:
12075 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12076 break;
12077 }
12078 }
12079
12080 /*
12081 * Check for debugger event breakpoints and dtrace probes.
12082 */
12083 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12084 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12085 {
12086 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12087 if (rcStrict != VINF_SUCCESS)
12088 return rcStrict;
12089 }
12090
12091 /*
12092 * Normal processing.
12093 */
12094#ifdef HMVMX_USE_FUNCTION_TABLE
12095 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12096#else
12097 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12098#endif
12099}
12100
12101/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette