VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 98026

Last change on this file since 98026 was 97777, checked in by vboxsync, 2 years ago

VMM/HMR0VMX: Corrected the HMVMX_CHECK_BREAK macro - it wasn't really breaking out of the pseudo-do-while-loop. ticketref:21332

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 525.1 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97777 2022-12-12 13:14:28Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 return ( X86_CR0_PE
737 | X86_CR0_NE
738 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
739 | X86_CR0_PG
740 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
741}
742
743
744/**
745 * Gets the CR4 guest/host mask.
746 *
747 * These bits typically does not change through the lifetime of a VM. Any bit set in
748 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
749 * by the guest.
750 *
751 * @returns The CR4 guest/host mask.
752 * @param pVCpu The cross context virtual CPU structure.
753 */
754static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
755{
756 /*
757 * We construct a mask of all CR4 bits that the guest can modify without causing
758 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
759 * a VM-exit when the guest attempts to modify them when executing using
760 * hardware-assisted VMX.
761 *
762 * When a feature is not exposed to the guest (and may be present on the host),
763 * we want to intercept guest modifications to the bit so we can emulate proper
764 * behavior (e.g., #GP).
765 *
766 * Furthermore, only modifications to those bits that don't require immediate
767 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
768 * depends on CR3 which might not always be the guest value while executing
769 * using hardware-assisted VMX.
770 */
771 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
772 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
773#ifdef IN_NEM_DARWIN
774 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
775#endif
776 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
777
778 /*
779 * Paranoia.
780 * Ensure features exposed to the guest are present on the host.
781 */
782 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
783#ifdef IN_NEM_DARWIN
784 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
785#endif
786 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
787
788 uint64_t const fGstMask = X86_CR4_PVI
789 | X86_CR4_TSD
790 | X86_CR4_DE
791 | X86_CR4_MCE
792 | X86_CR4_PCE
793 | X86_CR4_OSXMMEEXCPT
794 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
795#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
796 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
797 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
798#endif
799 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
800 return ~fGstMask;
801}
802
803
804/**
805 * Adds one or more exceptions to the exception bitmap and commits it to the current
806 * VMCS.
807 *
808 * @param pVCpu The cross context virtual CPU structure.
809 * @param pVmxTransient The VMX-transient structure.
810 * @param uXcptMask The exception(s) to add.
811 */
812static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
813{
814 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
815 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
816 if ((uXcptBitmap & uXcptMask) != uXcptMask)
817 {
818 uXcptBitmap |= uXcptMask;
819 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
820 AssertRC(rc);
821 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
822 }
823}
824
825
826/**
827 * Adds an exception to the exception bitmap and commits it to the current VMCS.
828 *
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param pVmxTransient The VMX-transient structure.
831 * @param uXcpt The exception to add.
832 */
833static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
834{
835 Assert(uXcpt <= X86_XCPT_LAST);
836 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
837}
838
839
840/**
841 * Remove one or more exceptions from the exception bitmap and commits it to the
842 * current VMCS.
843 *
844 * This takes care of not removing the exception intercept if a nested-guest
845 * requires the exception to be intercepted.
846 *
847 * @returns VBox status code.
848 * @param pVCpu The cross context virtual CPU structure.
849 * @param pVmxTransient The VMX-transient structure.
850 * @param uXcptMask The exception(s) to remove.
851 */
852static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
853{
854 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
855 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
856 if (uXcptBitmap & uXcptMask)
857 {
858#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
859 if (!pVmxTransient->fIsNestedGuest)
860 { /* likely */ }
861 else
862 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
863#endif
864#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
865 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
866 | RT_BIT(X86_XCPT_DE)
867 | RT_BIT(X86_XCPT_NM)
868 | RT_BIT(X86_XCPT_TS)
869 | RT_BIT(X86_XCPT_UD)
870 | RT_BIT(X86_XCPT_NP)
871 | RT_BIT(X86_XCPT_SS)
872 | RT_BIT(X86_XCPT_GP)
873 | RT_BIT(X86_XCPT_PF)
874 | RT_BIT(X86_XCPT_MF));
875#elif defined(HMVMX_ALWAYS_TRAP_PF)
876 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
877#endif
878 if (uXcptMask)
879 {
880 /* Validate we are not removing any essential exception intercepts. */
881#ifndef IN_NEM_DARWIN
882 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
883#else
884 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
885#endif
886 NOREF(pVCpu);
887 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
889
890 /* Remove it from the exception bitmap. */
891 uXcptBitmap &= ~uXcptMask;
892
893 /* Commit and update the cache if necessary. */
894 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
895 {
896 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
897 AssertRC(rc);
898 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
899 }
900 }
901 }
902 return VINF_SUCCESS;
903}
904
905
906/**
907 * Remove an exceptions from the exception bitmap and commits it to the current
908 * VMCS.
909 *
910 * @returns VBox status code.
911 * @param pVCpu The cross context virtual CPU structure.
912 * @param pVmxTransient The VMX-transient structure.
913 * @param uXcpt The exception to remove.
914 */
915static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
916{
917 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
918}
919
920#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
921
922/**
923 * Loads the shadow VMCS specified by the VMCS info. object.
924 *
925 * @returns VBox status code.
926 * @param pVmcsInfo The VMCS info. object.
927 *
928 * @remarks Can be called with interrupts disabled.
929 */
930static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
931{
932 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
933 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
934
935 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
936 if (RT_SUCCESS(rc))
937 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
938 return rc;
939}
940
941
942/**
943 * Clears the shadow VMCS specified by the VMCS info. object.
944 *
945 * @returns VBox status code.
946 * @param pVmcsInfo The VMCS info. object.
947 *
948 * @remarks Can be called with interrupts disabled.
949 */
950static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
951{
952 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
953 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
954
955 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
956 if (RT_SUCCESS(rc))
957 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
958 return rc;
959}
960
961
962/**
963 * Switches from and to the specified VMCSes.
964 *
965 * @returns VBox status code.
966 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
967 * @param pVmcsInfoTo The VMCS info. object we are switching to.
968 *
969 * @remarks Called with interrupts disabled.
970 */
971static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
972{
973 /*
974 * Clear the VMCS we are switching out if it has not already been cleared.
975 * This will sync any CPU internal data back to the VMCS.
976 */
977 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
978 {
979 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
980 if (RT_SUCCESS(rc))
981 {
982 /*
983 * The shadow VMCS, if any, would not be active at this point since we
984 * would have cleared it while importing the virtual hardware-virtualization
985 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
986 * clear the shadow VMCS here, just assert for safety.
987 */
988 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
989 }
990 else
991 return rc;
992 }
993
994 /*
995 * Clear the VMCS we are switching to if it has not already been cleared.
996 * This will initialize the VMCS launch state to "clear" required for loading it.
997 *
998 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
999 */
1000 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1001 {
1002 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1003 if (RT_SUCCESS(rc))
1004 { /* likely */ }
1005 else
1006 return rc;
1007 }
1008
1009 /*
1010 * Finally, load the VMCS we are switching to.
1011 */
1012 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1013}
1014
1015
1016/**
1017 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1018 * caller.
1019 *
1020 * @returns VBox status code.
1021 * @param pVCpu The cross context virtual CPU structure.
1022 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1023 * true) or guest VMCS (pass false).
1024 */
1025static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1026{
1027 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1028 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1029
1030 PVMXVMCSINFO pVmcsInfoFrom;
1031 PVMXVMCSINFO pVmcsInfoTo;
1032 if (fSwitchToNstGstVmcs)
1033 {
1034 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1035 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1036 }
1037 else
1038 {
1039 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1040 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1041 }
1042
1043 /*
1044 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1045 * preemption hook code path acquires the current VMCS.
1046 */
1047 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1048
1049 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1050 if (RT_SUCCESS(rc))
1051 {
1052 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1053 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1054
1055 /*
1056 * If we are switching to a VMCS that was executed on a different host CPU or was
1057 * never executed before, flag that we need to export the host state before executing
1058 * guest/nested-guest code using hardware-assisted VMX.
1059 *
1060 * This could probably be done in a preemptible context since the preemption hook
1061 * will flag the necessary change in host context. However, since preemption is
1062 * already disabled and to avoid making assumptions about host specific code in
1063 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1064 * disabled.
1065 */
1066 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1067 { /* likely */ }
1068 else
1069 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1070
1071 ASMSetFlags(fEFlags);
1072
1073 /*
1074 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1075 * flag that we need to update the host MSR values there. Even if we decide in the
1076 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1077 * if its content differs, we would have to update the host MSRs anyway.
1078 */
1079 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1080 }
1081 else
1082 ASMSetFlags(fEFlags);
1083 return rc;
1084}
1085
1086#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1087#ifdef VBOX_STRICT
1088
1089/**
1090 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1091 * transient structure.
1092 *
1093 * @param pVCpu The cross context virtual CPU structure.
1094 * @param pVmxTransient The VMX-transient structure.
1095 */
1096DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1097{
1098 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1099 AssertRC(rc);
1100}
1101
1102
1103/**
1104 * Reads the VM-entry exception error code field from the VMCS into
1105 * the VMX transient structure.
1106 *
1107 * @param pVCpu The cross context virtual CPU structure.
1108 * @param pVmxTransient The VMX-transient structure.
1109 */
1110DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1111{
1112 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1113 AssertRC(rc);
1114}
1115
1116
1117/**
1118 * Reads the VM-entry exception error code field from the VMCS into
1119 * the VMX transient structure.
1120 *
1121 * @param pVCpu The cross context virtual CPU structure.
1122 * @param pVmxTransient The VMX-transient structure.
1123 */
1124DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1125{
1126 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1127 AssertRC(rc);
1128}
1129
1130#endif /* VBOX_STRICT */
1131
1132
1133/**
1134 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1135 *
1136 * Don't call directly unless the it's likely that some or all of the fields
1137 * given in @a a_fReadMask have already been read.
1138 *
1139 * @tparam a_fReadMask The fields to read.
1140 * @param pVCpu The cross context virtual CPU structure.
1141 * @param pVmxTransient The VMX-transient structure.
1142 */
1143template<uint32_t const a_fReadMask>
1144static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1145{
1146 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1147 | HMVMX_READ_EXIT_INSTR_LEN
1148 | HMVMX_READ_EXIT_INSTR_INFO
1149 | HMVMX_READ_IDT_VECTORING_INFO
1150 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1151 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1152 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1153 | HMVMX_READ_GUEST_LINEAR_ADDR
1154 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1155 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1156 )) == 0);
1157
1158 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1159 {
1160 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1161
1162 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1163 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1164 {
1165 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1166 AssertRC(rc);
1167 }
1168 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1169 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1170 {
1171 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1172 AssertRC(rc);
1173 }
1174 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1175 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1176 {
1177 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1178 AssertRC(rc);
1179 }
1180 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1181 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1182 {
1183 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1184 AssertRC(rc);
1185 }
1186 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1187 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1188 {
1189 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1190 AssertRC(rc);
1191 }
1192 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1193 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1194 {
1195 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1196 AssertRC(rc);
1197 }
1198 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1199 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1200 {
1201 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1202 AssertRC(rc);
1203 }
1204 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1205 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1206 {
1207 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1208 AssertRC(rc);
1209 }
1210 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1211 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1212 {
1213 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1214 AssertRC(rc);
1215 }
1216 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1217 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1218 {
1219 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1220 AssertRC(rc);
1221 }
1222
1223 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1224 }
1225}
1226
1227
1228/**
1229 * Reads VMCS fields into the VMXTRANSIENT structure.
1230 *
1231 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1232 * generating an optimized read sequences w/o any conditionals between in
1233 * non-strict builds.
1234 *
1235 * @tparam a_fReadMask The fields to read. One or more of the
1236 * HMVMX_READ_XXX fields ORed together.
1237 * @param pVCpu The cross context virtual CPU structure.
1238 * @param pVmxTransient The VMX-transient structure.
1239 */
1240template<uint32_t const a_fReadMask>
1241DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1242{
1243 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1244 | HMVMX_READ_EXIT_INSTR_LEN
1245 | HMVMX_READ_EXIT_INSTR_INFO
1246 | HMVMX_READ_IDT_VECTORING_INFO
1247 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1248 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1249 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1250 | HMVMX_READ_GUEST_LINEAR_ADDR
1251 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1252 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1253 )) == 0);
1254
1255 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1256 {
1257 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1258 {
1259 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1260 AssertRC(rc);
1261 }
1262 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1263 {
1264 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1265 AssertRC(rc);
1266 }
1267 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1268 {
1269 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1270 AssertRC(rc);
1271 }
1272 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1273 {
1274 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1275 AssertRC(rc);
1276 }
1277 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1278 {
1279 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1280 AssertRC(rc);
1281 }
1282 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1283 {
1284 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1285 AssertRC(rc);
1286 }
1287 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1288 {
1289 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1290 AssertRC(rc);
1291 }
1292 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1293 {
1294 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1295 AssertRC(rc);
1296 }
1297 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1298 {
1299 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1300 AssertRC(rc);
1301 }
1302 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1303 {
1304 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1305 AssertRC(rc);
1306 }
1307
1308 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1309 }
1310 else
1311 {
1312 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1313 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1314 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1315 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1316 }
1317}
1318
1319
1320#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1321/**
1322 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1323 *
1324 * @param pVCpu The cross context virtual CPU structure.
1325 * @param pVmxTransient The VMX-transient structure.
1326 */
1327static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1328{
1329 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1330 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1336 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1337 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1338 AssertRC(rc);
1339 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1340 | HMVMX_READ_EXIT_INSTR_LEN
1341 | HMVMX_READ_EXIT_INSTR_INFO
1342 | HMVMX_READ_IDT_VECTORING_INFO
1343 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1344 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1345 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1346 | HMVMX_READ_GUEST_LINEAR_ADDR
1347 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1348}
1349#endif
1350
1351/**
1352 * Verifies that our cached values of the VMCS fields are all consistent with
1353 * what's actually present in the VMCS.
1354 *
1355 * @returns VBox status code.
1356 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1357 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1358 * VMCS content. HMCPU error-field is
1359 * updated, see VMX_VCI_XXX.
1360 * @param pVCpu The cross context virtual CPU structure.
1361 * @param pVmcsInfo The VMCS info. object.
1362 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1363 */
1364static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1365{
1366 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1367
1368 uint32_t u32Val;
1369 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1372 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1379 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1386 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1391 AssertRC(rc);
1392 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1393 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1394 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1395 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1396
1397 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1398 {
1399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1400 AssertRC(rc);
1401 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1402 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1403 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1404 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1405 }
1406
1407 uint64_t u64Val;
1408 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1409 {
1410 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1411 AssertRC(rc);
1412 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1413 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1414 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1415 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1416 }
1417
1418 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1421 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1426 AssertRC(rc);
1427 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1428 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1429 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1430 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1431
1432 NOREF(pcszVmcs);
1433 return VINF_SUCCESS;
1434}
1435
1436
1437/**
1438 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1439 * VMCS.
1440 *
1441 * This is typically required when the guest changes paging mode.
1442 *
1443 * @returns VBox status code.
1444 * @param pVCpu The cross context virtual CPU structure.
1445 * @param pVmxTransient The VMX-transient structure.
1446 *
1447 * @remarks Requires EFER.
1448 * @remarks No-long-jump zone!!!
1449 */
1450static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1451{
1452 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1453 {
1454 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1455 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1456
1457 /*
1458 * VM-entry controls.
1459 */
1460 {
1461 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1462 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1463
1464 /*
1465 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1466 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1467 *
1468 * For nested-guests, this is a mandatory VM-entry control. It's also
1469 * required because we do not want to leak host bits to the nested-guest.
1470 */
1471 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1472
1473 /*
1474 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1475 *
1476 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1477 * required to get the nested-guest working with hardware-assisted VMX execution.
1478 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1479 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1480 * here rather than while merging the guest VMCS controls.
1481 */
1482 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1483 {
1484 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1485 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1486 }
1487 else
1488 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1489
1490 /*
1491 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1492 *
1493 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1494 * regardless of whether the nested-guest VMCS specifies it because we are free to
1495 * load whatever MSRs we require and we do not need to modify the guest visible copy
1496 * of the VM-entry MSR load area.
1497 */
1498 if ( g_fHmVmxSupportsVmcsEfer
1499#ifndef IN_NEM_DARWIN
1500 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1501#endif
1502 )
1503 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1504 else
1505 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1506
1507 /*
1508 * The following should -not- be set (since we're not in SMM mode):
1509 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1510 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1511 */
1512
1513 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1514 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1515
1516 if ((fVal & fZap) == fVal)
1517 { /* likely */ }
1518 else
1519 {
1520 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1521 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1522 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1523 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1524 }
1525
1526 /* Commit it to the VMCS. */
1527 if (pVmcsInfo->u32EntryCtls != fVal)
1528 {
1529 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1530 AssertRC(rc);
1531 pVmcsInfo->u32EntryCtls = fVal;
1532 }
1533 }
1534
1535 /*
1536 * VM-exit controls.
1537 */
1538 {
1539 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1540 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1541
1542 /*
1543 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1544 * supported the 1-setting of this bit.
1545 *
1546 * For nested-guests, we set the "save debug controls" as the converse
1547 * "load debug controls" is mandatory for nested-guests anyway.
1548 */
1549 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1550
1551 /*
1552 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1553 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1554 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1555 * vmxHCExportHostMsrs().
1556 *
1557 * For nested-guests, we always set this bit as we do not support 32-bit
1558 * hosts.
1559 */
1560 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1561
1562#ifndef IN_NEM_DARWIN
1563 /*
1564 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1565 *
1566 * For nested-guests, we should use the "save IA32_EFER" control if we also
1567 * used the "load IA32_EFER" control while exporting VM-entry controls.
1568 */
1569 if ( g_fHmVmxSupportsVmcsEfer
1570 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1571 {
1572 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1573 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1574 }
1575#endif
1576
1577 /*
1578 * Enable saving of the VMX-preemption timer value on VM-exit.
1579 * For nested-guests, currently not exposed/used.
1580 */
1581 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1582 * the timer value. */
1583 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1584 {
1585 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1586 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1587 }
1588
1589 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1590 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1591
1592 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1593 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1594 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1595
1596 if ((fVal & fZap) == fVal)
1597 { /* likely */ }
1598 else
1599 {
1600 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1601 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1602 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1603 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1604 }
1605
1606 /* Commit it to the VMCS. */
1607 if (pVmcsInfo->u32ExitCtls != fVal)
1608 {
1609 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1610 AssertRC(rc);
1611 pVmcsInfo->u32ExitCtls = fVal;
1612 }
1613 }
1614
1615 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1616 }
1617 return VINF_SUCCESS;
1618}
1619
1620
1621/**
1622 * Sets the TPR threshold in the VMCS.
1623 *
1624 * @param pVCpu The cross context virtual CPU structure.
1625 * @param pVmcsInfo The VMCS info. object.
1626 * @param u32TprThreshold The TPR threshold (task-priority class only).
1627 */
1628DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1629{
1630 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1631 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1632 RT_NOREF(pVmcsInfo);
1633 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1634 AssertRC(rc);
1635}
1636
1637
1638/**
1639 * Exports the guest APIC TPR state into the VMCS.
1640 *
1641 * @param pVCpu The cross context virtual CPU structure.
1642 * @param pVmxTransient The VMX-transient structure.
1643 *
1644 * @remarks No-long-jump zone!!!
1645 */
1646static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1647{
1648 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1649 {
1650 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1651
1652 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1653 if (!pVmxTransient->fIsNestedGuest)
1654 {
1655 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1656 && APICIsEnabled(pVCpu))
1657 {
1658 /*
1659 * Setup TPR shadowing.
1660 */
1661 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1662 {
1663 bool fPendingIntr = false;
1664 uint8_t u8Tpr = 0;
1665 uint8_t u8PendingIntr = 0;
1666 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1667 AssertRC(rc);
1668
1669 /*
1670 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1671 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1672 * priority of the pending interrupt so we can deliver the interrupt. If there
1673 * are no interrupts pending, set threshold to 0 to not cause any
1674 * TPR-below-threshold VM-exits.
1675 */
1676 uint32_t u32TprThreshold = 0;
1677 if (fPendingIntr)
1678 {
1679 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1680 (which is the Task-Priority Class). */
1681 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1682 const uint8_t u8TprPriority = u8Tpr >> 4;
1683 if (u8PendingPriority <= u8TprPriority)
1684 u32TprThreshold = u8PendingPriority;
1685 }
1686
1687 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1688 }
1689 }
1690 }
1691 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1692 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1693 }
1694}
1695
1696
1697/**
1698 * Gets the guest interruptibility-state and updates related force-flags.
1699 *
1700 * @returns Guest's interruptibility-state.
1701 * @param pVCpu The cross context virtual CPU structure.
1702 *
1703 * @remarks No-long-jump zone!!!
1704 */
1705static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1706{
1707 uint32_t fIntrState;
1708
1709 /*
1710 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1711 */
1712 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1713 fIntrState = 0;
1714 else
1715 {
1716 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1717 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1718
1719 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1720 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1721 else
1722 {
1723 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1724
1725 /* Block-by-STI must not be set when interrupts are disabled. */
1726 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1727 }
1728 }
1729
1730 /*
1731 * Check if we should inhibit NMI delivery.
1732 */
1733 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1734 { /* likely */ }
1735 else
1736 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1737
1738 /*
1739 * Validate.
1740 */
1741 /* We don't support block-by-SMI yet.*/
1742 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1743
1744 return fIntrState;
1745}
1746
1747
1748/**
1749 * Exports the exception intercepts required for guest execution in the VMCS.
1750 *
1751 * @param pVCpu The cross context virtual CPU structure.
1752 * @param pVmxTransient The VMX-transient structure.
1753 *
1754 * @remarks No-long-jump zone!!!
1755 */
1756static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1757{
1758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1759 {
1760 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1761 if ( !pVmxTransient->fIsNestedGuest
1762 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1763 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1764 else
1765 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1766
1767 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1768 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1769 }
1770}
1771
1772
1773/**
1774 * Exports the guest's RIP into the guest-state area in the VMCS.
1775 *
1776 * @param pVCpu The cross context virtual CPU structure.
1777 *
1778 * @remarks No-long-jump zone!!!
1779 */
1780static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1781{
1782 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1783 {
1784 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1785
1786 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1787 AssertRC(rc);
1788
1789 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1790 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1791 }
1792}
1793
1794
1795/**
1796 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1797 *
1798 * @param pVCpu The cross context virtual CPU structure.
1799 * @param pVmxTransient The VMX-transient structure.
1800 *
1801 * @remarks No-long-jump zone!!!
1802 */
1803static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1804{
1805 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1806 {
1807 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1808
1809 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1810 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1811 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1812 Use 32-bit VMWRITE. */
1813 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1814 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1815 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1816
1817#ifndef IN_NEM_DARWIN
1818 /*
1819 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1820 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1821 * can run the real-mode guest code under Virtual 8086 mode.
1822 */
1823 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1824 if (pVmcsInfo->RealMode.fRealOnV86Active)
1825 {
1826 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1827 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1828 Assert(!pVmxTransient->fIsNestedGuest);
1829 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1830 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1831 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1832 }
1833#else
1834 RT_NOREF(pVmxTransient);
1835#endif
1836
1837 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1838 AssertRC(rc);
1839
1840 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1841 Log4Func(("eflags=%#RX32\n", fEFlags));
1842 }
1843}
1844
1845
1846#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1847/**
1848 * Copies the nested-guest VMCS to the shadow VMCS.
1849 *
1850 * @returns VBox status code.
1851 * @param pVCpu The cross context virtual CPU structure.
1852 * @param pVmcsInfo The VMCS info. object.
1853 *
1854 * @remarks No-long-jump zone!!!
1855 */
1856static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1857{
1858 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1859 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1860
1861 /*
1862 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1863 * current VMCS, as we may try saving guest lazy MSRs.
1864 *
1865 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1866 * calling the import VMCS code which is currently performing the guest MSR reads
1867 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1868 * and the rest of the VMX leave session machinery.
1869 */
1870 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1871
1872 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1873 if (RT_SUCCESS(rc))
1874 {
1875 /*
1876 * Copy all guest read/write VMCS fields.
1877 *
1878 * We don't check for VMWRITE failures here for performance reasons and
1879 * because they are not expected to fail, barring irrecoverable conditions
1880 * like hardware errors.
1881 */
1882 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1883 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1884 {
1885 uint64_t u64Val;
1886 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1887 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1888 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1889 }
1890
1891 /*
1892 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1893 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1894 */
1895 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1896 {
1897 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1898 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1899 {
1900 uint64_t u64Val;
1901 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1902 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1903 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1904 }
1905 }
1906
1907 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1908 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1909 }
1910
1911 ASMSetFlags(fEFlags);
1912 return rc;
1913}
1914
1915
1916/**
1917 * Copies the shadow VMCS to the nested-guest VMCS.
1918 *
1919 * @returns VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure.
1921 * @param pVmcsInfo The VMCS info. object.
1922 *
1923 * @remarks Called with interrupts disabled.
1924 */
1925static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1926{
1927 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1928 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1929 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1930
1931 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1932 if (RT_SUCCESS(rc))
1933 {
1934 /*
1935 * Copy guest read/write fields from the shadow VMCS.
1936 * Guest read-only fields cannot be modified, so no need to copy them.
1937 *
1938 * We don't check for VMREAD failures here for performance reasons and
1939 * because they are not expected to fail, barring irrecoverable conditions
1940 * like hardware errors.
1941 */
1942 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1943 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1944 {
1945 uint64_t u64Val;
1946 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1947 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1948 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1949 }
1950
1951 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1952 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1953 }
1954 return rc;
1955}
1956
1957
1958/**
1959 * Enables VMCS shadowing for the given VMCS info. object.
1960 *
1961 * @param pVCpu The cross context virtual CPU structure.
1962 * @param pVmcsInfo The VMCS info. object.
1963 *
1964 * @remarks No-long-jump zone!!!
1965 */
1966static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1967{
1968 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1969 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1970 {
1971 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1972 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1973 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1974 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1975 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1976 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1977 Log4Func(("Enabled\n"));
1978 }
1979}
1980
1981
1982/**
1983 * Disables VMCS shadowing for the given VMCS info. object.
1984 *
1985 * @param pVCpu The cross context virtual CPU structure.
1986 * @param pVmcsInfo The VMCS info. object.
1987 *
1988 * @remarks No-long-jump zone!!!
1989 */
1990static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1991{
1992 /*
1993 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1994 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1995 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1996 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1997 *
1998 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
1999 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2000 */
2001 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2002 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2003 {
2004 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2005 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2006 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2007 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2008 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2009 Log4Func(("Disabled\n"));
2010 }
2011}
2012#endif
2013
2014
2015/**
2016 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2017 *
2018 * The guest FPU state is always pre-loaded hence we don't need to bother about
2019 * sharing FPU related CR0 bits between the guest and host.
2020 *
2021 * @returns VBox status code.
2022 * @param pVCpu The cross context virtual CPU structure.
2023 * @param pVmxTransient The VMX-transient structure.
2024 *
2025 * @remarks No-long-jump zone!!!
2026 */
2027static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2028{
2029 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2030 {
2031 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2032 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2033
2034 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2035 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2036 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2037 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2038 else
2039 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2040
2041 if (!pVmxTransient->fIsNestedGuest)
2042 {
2043 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2044 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2045 uint64_t const u64ShadowCr0 = u64GuestCr0;
2046 Assert(!RT_HI_U32(u64GuestCr0));
2047
2048 /*
2049 * Setup VT-x's view of the guest CR0.
2050 */
2051 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2052 if (VM_IS_VMX_NESTED_PAGING(pVM))
2053 {
2054#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2055 if (CPUMIsGuestPagingEnabled(pVCpu))
2056 {
2057 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2058 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2059 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2060 }
2061 else
2062 {
2063 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2064 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2065 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2066 }
2067
2068 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2069 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2070 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2071#endif
2072 }
2073 else
2074 {
2075 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2076 u64GuestCr0 |= X86_CR0_WP;
2077 }
2078
2079 /*
2080 * Guest FPU bits.
2081 *
2082 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2083 * using CR0.TS.
2084 *
2085 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2086 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2087 */
2088 u64GuestCr0 |= X86_CR0_NE;
2089
2090 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2091 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2092
2093 /*
2094 * Update exception intercepts.
2095 */
2096 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2097#ifndef IN_NEM_DARWIN
2098 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2099 {
2100 Assert(PDMVmmDevHeapIsEnabled(pVM));
2101 Assert(pVM->hm.s.vmx.pRealModeTSS);
2102 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2103 }
2104 else
2105#endif
2106 {
2107 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2108 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2109 if (fInterceptMF)
2110 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2111 }
2112
2113 /* Additional intercepts for debugging, define these yourself explicitly. */
2114#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2115 uXcptBitmap |= 0
2116 | RT_BIT(X86_XCPT_BP)
2117 | RT_BIT(X86_XCPT_DE)
2118 | RT_BIT(X86_XCPT_NM)
2119 | RT_BIT(X86_XCPT_TS)
2120 | RT_BIT(X86_XCPT_UD)
2121 | RT_BIT(X86_XCPT_NP)
2122 | RT_BIT(X86_XCPT_SS)
2123 | RT_BIT(X86_XCPT_GP)
2124 | RT_BIT(X86_XCPT_PF)
2125 | RT_BIT(X86_XCPT_MF)
2126 ;
2127#elif defined(HMVMX_ALWAYS_TRAP_PF)
2128 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2129#endif
2130 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2131 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2132 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2133 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2134 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2135
2136 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2137 u64GuestCr0 |= fSetCr0;
2138 u64GuestCr0 &= fZapCr0;
2139 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2140
2141 /* Commit the CR0 and related fields to the guest VMCS. */
2142 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2143 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2144 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2145 {
2146 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2147 AssertRC(rc);
2148 }
2149 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2150 {
2151 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2152 AssertRC(rc);
2153 }
2154
2155 /* Update our caches. */
2156 pVmcsInfo->u32ProcCtls = uProcCtls;
2157 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2158
2159 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2160 }
2161 else
2162 {
2163 /*
2164 * With nested-guests, we may have extended the guest/host mask here since we
2165 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2166 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2167 * originally supplied. We must copy those bits from the nested-guest CR0 into
2168 * the nested-guest CR0 read-shadow.
2169 */
2170 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2171 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2172 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2173 Assert(!RT_HI_U32(u64GuestCr0));
2174 Assert(u64GuestCr0 & X86_CR0_NE);
2175
2176 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2177 u64GuestCr0 |= fSetCr0;
2178 u64GuestCr0 &= fZapCr0;
2179 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2180
2181 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2182 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2183 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2184
2185 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2186 }
2187
2188 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2189 }
2190
2191 return VINF_SUCCESS;
2192}
2193
2194
2195/**
2196 * Exports the guest control registers (CR3, CR4) into the guest-state area
2197 * in the VMCS.
2198 *
2199 * @returns VBox strict status code.
2200 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2201 * without unrestricted guest access and the VMMDev is not presently
2202 * mapped (e.g. EFI32).
2203 *
2204 * @param pVCpu The cross context virtual CPU structure.
2205 * @param pVmxTransient The VMX-transient structure.
2206 *
2207 * @remarks No-long-jump zone!!!
2208 */
2209static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2210{
2211 int rc = VINF_SUCCESS;
2212 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2213
2214 /*
2215 * Guest CR2.
2216 * It's always loaded in the assembler code. Nothing to do here.
2217 */
2218
2219 /*
2220 * Guest CR3.
2221 */
2222 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2223 {
2224 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2225
2226 if (VM_IS_VMX_NESTED_PAGING(pVM))
2227 {
2228#ifndef IN_NEM_DARWIN
2229 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2230 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2231
2232 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2233 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2234 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2235 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2236
2237 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2238 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2239 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2240
2241 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2242 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2243 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2244 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2245 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2246 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2247 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2248
2249 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2250 AssertRC(rc);
2251#endif
2252
2253 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2254 uint64_t u64GuestCr3 = pCtx->cr3;
2255 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2256 || CPUMIsGuestPagingEnabledEx(pCtx))
2257 {
2258 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2259 if (CPUMIsGuestInPAEModeEx(pCtx))
2260 {
2261 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2262 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2263 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2264 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2265 }
2266
2267 /*
2268 * The guest's view of its CR3 is unblemished with nested paging when the
2269 * guest is using paging or we have unrestricted guest execution to handle
2270 * the guest when it's not using paging.
2271 */
2272 }
2273#ifndef IN_NEM_DARWIN
2274 else
2275 {
2276 /*
2277 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2278 * thinks it accesses physical memory directly, we use our identity-mapped
2279 * page table to map guest-linear to guest-physical addresses. EPT takes care
2280 * of translating it to host-physical addresses.
2281 */
2282 RTGCPHYS GCPhys;
2283 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2284
2285 /* We obtain it here every time as the guest could have relocated this PCI region. */
2286 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2287 if (RT_SUCCESS(rc))
2288 { /* likely */ }
2289 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2290 {
2291 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2292 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2293 }
2294 else
2295 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2296
2297 u64GuestCr3 = GCPhys;
2298 }
2299#endif
2300
2301 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2302 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2303 AssertRC(rc);
2304 }
2305 else
2306 {
2307 Assert(!pVmxTransient->fIsNestedGuest);
2308 /* Non-nested paging case, just use the hypervisor's CR3. */
2309 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2310
2311 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2312 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2313 AssertRC(rc);
2314 }
2315
2316 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2317 }
2318
2319 /*
2320 * Guest CR4.
2321 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2322 */
2323 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2324 {
2325 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2326 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2327
2328 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2329 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2330
2331 /*
2332 * With nested-guests, we may have extended the guest/host mask here (since we
2333 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2334 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2335 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2336 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2337 */
2338 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2339 uint64_t u64GuestCr4 = pCtx->cr4;
2340 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2341 ? pCtx->cr4
2342 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2343 Assert(!RT_HI_U32(u64GuestCr4));
2344
2345#ifndef IN_NEM_DARWIN
2346 /*
2347 * Setup VT-x's view of the guest CR4.
2348 *
2349 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2350 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2351 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2352 *
2353 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2354 */
2355 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2356 {
2357 Assert(pVM->hm.s.vmx.pRealModeTSS);
2358 Assert(PDMVmmDevHeapIsEnabled(pVM));
2359 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2360 }
2361#endif
2362
2363 if (VM_IS_VMX_NESTED_PAGING(pVM))
2364 {
2365 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2366 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2367 {
2368 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2369 u64GuestCr4 |= X86_CR4_PSE;
2370 /* Our identity mapping is a 32-bit page directory. */
2371 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2372 }
2373 /* else use guest CR4.*/
2374 }
2375 else
2376 {
2377 Assert(!pVmxTransient->fIsNestedGuest);
2378
2379 /*
2380 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2381 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2382 */
2383 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2384 {
2385 case PGMMODE_REAL: /* Real-mode. */
2386 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2387 case PGMMODE_32_BIT: /* 32-bit paging. */
2388 {
2389 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2390 break;
2391 }
2392
2393 case PGMMODE_PAE: /* PAE paging. */
2394 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2395 {
2396 u64GuestCr4 |= X86_CR4_PAE;
2397 break;
2398 }
2399
2400 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2401 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2402 {
2403#ifdef VBOX_WITH_64_BITS_GUESTS
2404 /* For our assumption in vmxHCShouldSwapEferMsr. */
2405 Assert(u64GuestCr4 & X86_CR4_PAE);
2406 break;
2407#endif
2408 }
2409 default:
2410 AssertFailed();
2411 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2412 }
2413 }
2414
2415 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2416 u64GuestCr4 |= fSetCr4;
2417 u64GuestCr4 &= fZapCr4;
2418
2419 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2420 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2421 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2422
2423#ifndef IN_NEM_DARWIN
2424 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2425 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2426 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2427 {
2428 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2429 hmR0VmxUpdateStartVmFunction(pVCpu);
2430 }
2431#endif
2432
2433 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2434
2435 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2436 }
2437 return rc;
2438}
2439
2440
2441#ifdef VBOX_STRICT
2442/**
2443 * Strict function to validate segment registers.
2444 *
2445 * @param pVCpu The cross context virtual CPU structure.
2446 * @param pVmcsInfo The VMCS info. object.
2447 *
2448 * @remarks Will import guest CR0 on strict builds during validation of
2449 * segments.
2450 */
2451static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2452{
2453 /*
2454 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2455 *
2456 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2457 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2458 * unusable bit and doesn't change the guest-context value.
2459 */
2460 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2461 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2462 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2463 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2464 && ( !CPUMIsGuestInRealModeEx(pCtx)
2465 && !CPUMIsGuestInV86ModeEx(pCtx)))
2466 {
2467 /* Protected mode checks */
2468 /* CS */
2469 Assert(pCtx->cs.Attr.n.u1Present);
2470 Assert(!(pCtx->cs.Attr.u & 0xf00));
2471 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2472 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2473 || !(pCtx->cs.Attr.n.u1Granularity));
2474 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2475 || (pCtx->cs.Attr.n.u1Granularity));
2476 /* CS cannot be loaded with NULL in protected mode. */
2477 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2478 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2479 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2480 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2481 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2482 else
2483 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2484 /* SS */
2485 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2486 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2487 if ( !(pCtx->cr0 & X86_CR0_PE)
2488 || pCtx->cs.Attr.n.u4Type == 3)
2489 {
2490 Assert(!pCtx->ss.Attr.n.u2Dpl);
2491 }
2492 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2493 {
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2496 Assert(pCtx->ss.Attr.n.u1Present);
2497 Assert(!(pCtx->ss.Attr.u & 0xf00));
2498 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2499 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2500 || !(pCtx->ss.Attr.n.u1Granularity));
2501 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2502 || (pCtx->ss.Attr.n.u1Granularity));
2503 }
2504 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2505 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2506 {
2507 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2508 Assert(pCtx->ds.Attr.n.u1Present);
2509 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2510 Assert(!(pCtx->ds.Attr.u & 0xf00));
2511 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2512 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2513 || !(pCtx->ds.Attr.n.u1Granularity));
2514 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2515 || (pCtx->ds.Attr.n.u1Granularity));
2516 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2517 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2518 }
2519 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2520 {
2521 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2522 Assert(pCtx->es.Attr.n.u1Present);
2523 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2524 Assert(!(pCtx->es.Attr.u & 0xf00));
2525 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2526 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2527 || !(pCtx->es.Attr.n.u1Granularity));
2528 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2529 || (pCtx->es.Attr.n.u1Granularity));
2530 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2531 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2532 }
2533 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2534 {
2535 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2536 Assert(pCtx->fs.Attr.n.u1Present);
2537 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2538 Assert(!(pCtx->fs.Attr.u & 0xf00));
2539 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2540 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2541 || !(pCtx->fs.Attr.n.u1Granularity));
2542 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2543 || (pCtx->fs.Attr.n.u1Granularity));
2544 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2545 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2546 }
2547 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2550 Assert(pCtx->gs.Attr.n.u1Present);
2551 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2552 Assert(!(pCtx->gs.Attr.u & 0xf00));
2553 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->gs.Attr.n.u1Granularity));
2556 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2557 || (pCtx->gs.Attr.n.u1Granularity));
2558 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2559 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2560 }
2561 /* 64-bit capable CPUs. */
2562 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2563 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2564 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2565 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2566 }
2567 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2568 || ( CPUMIsGuestInRealModeEx(pCtx)
2569 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2570 {
2571 /* Real and v86 mode checks. */
2572 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2573 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2574#ifndef IN_NEM_DARWIN
2575 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2576 {
2577 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2578 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2579 }
2580 else
2581#endif
2582 {
2583 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2584 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2585 }
2586
2587 /* CS */
2588 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2589 Assert(pCtx->cs.u32Limit == 0xffff);
2590 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2591 /* SS */
2592 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2593 Assert(pCtx->ss.u32Limit == 0xffff);
2594 Assert(u32SSAttr == 0xf3);
2595 /* DS */
2596 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2597 Assert(pCtx->ds.u32Limit == 0xffff);
2598 Assert(u32DSAttr == 0xf3);
2599 /* ES */
2600 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2601 Assert(pCtx->es.u32Limit == 0xffff);
2602 Assert(u32ESAttr == 0xf3);
2603 /* FS */
2604 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2605 Assert(pCtx->fs.u32Limit == 0xffff);
2606 Assert(u32FSAttr == 0xf3);
2607 /* GS */
2608 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2609 Assert(pCtx->gs.u32Limit == 0xffff);
2610 Assert(u32GSAttr == 0xf3);
2611 /* 64-bit capable CPUs. */
2612 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2613 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2614 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2615 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2616 }
2617}
2618#endif /* VBOX_STRICT */
2619
2620
2621/**
2622 * Exports a guest segment register into the guest-state area in the VMCS.
2623 *
2624 * @returns VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure.
2626 * @param pVmcsInfo The VMCS info. object.
2627 * @param iSegReg The segment register number (X86_SREG_XXX).
2628 * @param pSelReg Pointer to the segment selector.
2629 *
2630 * @remarks No-long-jump zone!!!
2631 */
2632static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2633{
2634 Assert(iSegReg < X86_SREG_COUNT);
2635
2636 uint32_t u32Access = pSelReg->Attr.u;
2637#ifndef IN_NEM_DARWIN
2638 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2639#endif
2640 {
2641 /*
2642 * The way to differentiate between whether this is really a null selector or was just
2643 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2644 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2645 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2646 * NULL selectors loaded in protected-mode have their attribute as 0.
2647 */
2648 if (u32Access)
2649 { }
2650 else
2651 u32Access = X86DESCATTR_UNUSABLE;
2652 }
2653#ifndef IN_NEM_DARWIN
2654 else
2655 {
2656 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2657 u32Access = 0xf3;
2658 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2659 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2660 RT_NOREF_PV(pVCpu);
2661 }
2662#else
2663 RT_NOREF(pVmcsInfo);
2664#endif
2665
2666 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2667 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2668 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2669
2670 /*
2671 * Commit it to the VMCS.
2672 */
2673 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2674 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2675 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2676 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2677 return VINF_SUCCESS;
2678}
2679
2680
2681/**
2682 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2683 * area in the VMCS.
2684 *
2685 * @returns VBox status code.
2686 * @param pVCpu The cross context virtual CPU structure.
2687 * @param pVmxTransient The VMX-transient structure.
2688 *
2689 * @remarks Will import guest CR0 on strict builds during validation of
2690 * segments.
2691 * @remarks No-long-jump zone!!!
2692 */
2693static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2694{
2695 int rc = VERR_INTERNAL_ERROR_5;
2696#ifndef IN_NEM_DARWIN
2697 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2698#endif
2699 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2700 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2701#ifndef IN_NEM_DARWIN
2702 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2703#endif
2704
2705 /*
2706 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2707 */
2708 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2709 {
2710 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2711 {
2712 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2713#ifndef IN_NEM_DARWIN
2714 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2715 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2716#endif
2717 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2718 AssertRC(rc);
2719 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2720 }
2721
2722 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2723 {
2724 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2725#ifndef IN_NEM_DARWIN
2726 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2727 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2728#endif
2729 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2730 AssertRC(rc);
2731 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2732 }
2733
2734 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2735 {
2736 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2737#ifndef IN_NEM_DARWIN
2738 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2739 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2740#endif
2741 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2742 AssertRC(rc);
2743 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2744 }
2745
2746 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2747 {
2748 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2749#ifndef IN_NEM_DARWIN
2750 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2751 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2752#endif
2753 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2754 AssertRC(rc);
2755 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2756 }
2757
2758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2759 {
2760 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2761#ifndef IN_NEM_DARWIN
2762 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2763 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2764#endif
2765 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2766 AssertRC(rc);
2767 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2768 }
2769
2770 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2771 {
2772 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2773#ifndef IN_NEM_DARWIN
2774 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2775 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2776#endif
2777 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2778 AssertRC(rc);
2779 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2780 }
2781
2782#ifdef VBOX_STRICT
2783 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2784#endif
2785 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2786 pCtx->cs.Attr.u));
2787 }
2788
2789 /*
2790 * Guest TR.
2791 */
2792 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2793 {
2794 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2795
2796 /*
2797 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2798 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2799 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2800 */
2801 uint16_t u16Sel;
2802 uint32_t u32Limit;
2803 uint64_t u64Base;
2804 uint32_t u32AccessRights;
2805#ifndef IN_NEM_DARWIN
2806 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2807#endif
2808 {
2809 u16Sel = pCtx->tr.Sel;
2810 u32Limit = pCtx->tr.u32Limit;
2811 u64Base = pCtx->tr.u64Base;
2812 u32AccessRights = pCtx->tr.Attr.u;
2813 }
2814#ifndef IN_NEM_DARWIN
2815 else
2816 {
2817 Assert(!pVmxTransient->fIsNestedGuest);
2818 Assert(pVM->hm.s.vmx.pRealModeTSS);
2819 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2820
2821 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2822 RTGCPHYS GCPhys;
2823 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2824 AssertRCReturn(rc, rc);
2825
2826 X86DESCATTR DescAttr;
2827 DescAttr.u = 0;
2828 DescAttr.n.u1Present = 1;
2829 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2830
2831 u16Sel = 0;
2832 u32Limit = HM_VTX_TSS_SIZE;
2833 u64Base = GCPhys;
2834 u32AccessRights = DescAttr.u;
2835 }
2836#endif
2837
2838 /* Validate. */
2839 Assert(!(u16Sel & RT_BIT(2)));
2840 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2841 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2842 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2843 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2844 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2845 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2846 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2847 Assert( (u32Limit & 0xfff) == 0xfff
2848 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2849 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2850 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2851
2852 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2853 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2854 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2855 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2856
2857 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2858 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2859 }
2860
2861 /*
2862 * Guest GDTR.
2863 */
2864 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2865 {
2866 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2867
2868 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2869 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2870
2871 /* Validate. */
2872 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2873
2874 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2875 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2876 }
2877
2878 /*
2879 * Guest LDTR.
2880 */
2881 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2882 {
2883 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2884
2885 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2886 uint32_t u32Access;
2887 if ( !pVmxTransient->fIsNestedGuest
2888 && !pCtx->ldtr.Attr.u)
2889 u32Access = X86DESCATTR_UNUSABLE;
2890 else
2891 u32Access = pCtx->ldtr.Attr.u;
2892
2893 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2895 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2896 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2897
2898 /* Validate. */
2899 if (!(u32Access & X86DESCATTR_UNUSABLE))
2900 {
2901 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2902 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2903 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2904 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2905 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2906 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2907 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2908 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2909 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2910 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2911 }
2912
2913 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2914 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2915 }
2916
2917 /*
2918 * Guest IDTR.
2919 */
2920 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2921 {
2922 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2923
2924 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2925 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2926
2927 /* Validate. */
2928 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2929
2930 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2931 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2932 }
2933
2934 return VINF_SUCCESS;
2935}
2936
2937
2938/**
2939 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2940 * VM-exit interruption info type.
2941 *
2942 * @returns The IEM exception flags.
2943 * @param uVector The event vector.
2944 * @param uVmxEventType The VMX event type.
2945 *
2946 * @remarks This function currently only constructs flags required for
2947 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2948 * and CR2 aspects of an exception are not included).
2949 */
2950static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2951{
2952 uint32_t fIemXcptFlags;
2953 switch (uVmxEventType)
2954 {
2955 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2956 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2957 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2958 break;
2959
2960 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2961 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2962 break;
2963
2964 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2965 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2966 break;
2967
2968 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2969 {
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2971 if (uVector == X86_XCPT_BP)
2972 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2973 else if (uVector == X86_XCPT_OF)
2974 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2975 else
2976 {
2977 fIemXcptFlags = 0;
2978 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2979 }
2980 break;
2981 }
2982
2983 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2984 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2985 break;
2986
2987 default:
2988 fIemXcptFlags = 0;
2989 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2990 break;
2991 }
2992 return fIemXcptFlags;
2993}
2994
2995
2996/**
2997 * Sets an event as a pending event to be injected into the guest.
2998 *
2999 * @param pVCpu The cross context virtual CPU structure.
3000 * @param u32IntInfo The VM-entry interruption-information field.
3001 * @param cbInstr The VM-entry instruction length in bytes (for
3002 * software interrupts, exceptions and privileged
3003 * software exceptions).
3004 * @param u32ErrCode The VM-entry exception error code.
3005 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3006 * page-fault.
3007 */
3008DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3009 RTGCUINTPTR GCPtrFaultAddress)
3010{
3011 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3012 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3013 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3014 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3015 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3016 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3017}
3018
3019
3020/**
3021 * Sets an external interrupt as pending-for-injection into the VM.
3022 *
3023 * @param pVCpu The cross context virtual CPU structure.
3024 * @param u8Interrupt The external interrupt vector.
3025 */
3026DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3027{
3028 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3029 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3030 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3031 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3032 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3033}
3034
3035
3036/**
3037 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3038 *
3039 * @param pVCpu The cross context virtual CPU structure.
3040 */
3041DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3042{
3043 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3044 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3045 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3046 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3047 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3048}
3049
3050
3051/**
3052 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3053 *
3054 * @param pVCpu The cross context virtual CPU structure.
3055 */
3056DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3057{
3058 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3059 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3060 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3061 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3062 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3063}
3064
3065
3066/**
3067 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3068 *
3069 * @param pVCpu The cross context virtual CPU structure.
3070 */
3071DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3072{
3073 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3074 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3075 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3076 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3077 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3078}
3079
3080
3081/**
3082 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3083 *
3084 * @param pVCpu The cross context virtual CPU structure.
3085 */
3086DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3087{
3088 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3089 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3090 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3091 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3092 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3093}
3094
3095
3096#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3097/**
3098 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3099 *
3100 * @param pVCpu The cross context virtual CPU structure.
3101 * @param u32ErrCode The error code for the general-protection exception.
3102 */
3103DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3104{
3105 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3106 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3107 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3108 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3109 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3110}
3111
3112
3113/**
3114 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3115 *
3116 * @param pVCpu The cross context virtual CPU structure.
3117 * @param u32ErrCode The error code for the stack exception.
3118 */
3119DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3120{
3121 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3122 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3123 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3124 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3125 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3126}
3127#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3128
3129
3130/**
3131 * Fixes up attributes for the specified segment register.
3132 *
3133 * @param pVCpu The cross context virtual CPU structure.
3134 * @param pSelReg The segment register that needs fixing.
3135 * @param pszRegName The register name (for logging and assertions).
3136 */
3137static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3138{
3139 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3140
3141 /*
3142 * If VT-x marks the segment as unusable, most other bits remain undefined:
3143 * - For CS the L, D and G bits have meaning.
3144 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3145 * - For the remaining data segments no bits are defined.
3146 *
3147 * The present bit and the unusable bit has been observed to be set at the
3148 * same time (the selector was supposed to be invalid as we started executing
3149 * a V8086 interrupt in ring-0).
3150 *
3151 * What should be important for the rest of the VBox code, is that the P bit is
3152 * cleared. Some of the other VBox code recognizes the unusable bit, but
3153 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3154 * safe side here, we'll strip off P and other bits we don't care about. If
3155 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3156 *
3157 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3158 */
3159#ifdef VBOX_STRICT
3160 uint32_t const uAttr = pSelReg->Attr.u;
3161#endif
3162
3163 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3164 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3165 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3166
3167#ifdef VBOX_STRICT
3168# ifndef IN_NEM_DARWIN
3169 VMMRZCallRing3Disable(pVCpu);
3170# endif
3171 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3172# ifdef DEBUG_bird
3173 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3174 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3175 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3176# endif
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Enable(pVCpu);
3179# endif
3180 NOREF(uAttr);
3181#endif
3182 RT_NOREF2(pVCpu, pszRegName);
3183}
3184
3185
3186/**
3187 * Imports a guest segment register from the current VMCS into the guest-CPU
3188 * context.
3189 *
3190 * @param pVCpu The cross context virtual CPU structure.
3191 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3192 *
3193 * @remarks Called with interrupts and/or preemption disabled.
3194 */
3195template<uint32_t const a_iSegReg>
3196DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3197{
3198 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3199 /* Check that the macros we depend upon here and in the export parenter function works: */
3200#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3201 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3202 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3203 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3204 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3205 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3206 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3207 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3208 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3209 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3210 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3211
3212 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3213
3214 uint16_t u16Sel;
3215 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3216 pSelReg->Sel = u16Sel;
3217 pSelReg->ValidSel = u16Sel;
3218
3219 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3220 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3221
3222 uint32_t u32Attr;
3223 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3224 pSelReg->Attr.u = u32Attr;
3225 if (u32Attr & X86DESCATTR_UNUSABLE)
3226 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3227
3228 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3229}
3230
3231
3232/**
3233 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3234 *
3235 * @param pVCpu The cross context virtual CPU structure.
3236 *
3237 * @remarks Called with interrupts and/or preemption disabled.
3238 */
3239DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3240{
3241 uint16_t u16Sel;
3242 uint64_t u64Base;
3243 uint32_t u32Limit, u32Attr;
3244 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3245 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3246 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3247 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3248
3249 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3250 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3251 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3252 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3253 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3254 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3255 if (u32Attr & X86DESCATTR_UNUSABLE)
3256 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3257}
3258
3259
3260/**
3261 * Imports the guest TR from the current VMCS into the guest-CPU context.
3262 *
3263 * @param pVCpu The cross context virtual CPU structure.
3264 *
3265 * @remarks Called with interrupts and/or preemption disabled.
3266 */
3267DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3268{
3269 uint16_t u16Sel;
3270 uint64_t u64Base;
3271 uint32_t u32Limit, u32Attr;
3272 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3273 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3274 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3275 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3276
3277 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3278 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3279 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3280 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3281 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3282 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3283 /* TR is the only selector that can never be unusable. */
3284 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3285}
3286
3287
3288/**
3289 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3290 *
3291 * @returns The RIP value.
3292 * @param pVCpu The cross context virtual CPU structure.
3293 *
3294 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3295 * @remarks Do -not- call this function directly!
3296 */
3297DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3298{
3299 uint64_t u64Val;
3300 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3301 AssertRC(rc);
3302
3303 pVCpu->cpum.GstCtx.rip = u64Val;
3304
3305 return u64Val;
3306}
3307
3308
3309/**
3310 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3311 *
3312 * @param pVCpu The cross context virtual CPU structure.
3313 *
3314 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3315 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3316 * instead!!!
3317 */
3318DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3319{
3320 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3321 {
3322 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3323 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3324 }
3325}
3326
3327
3328/**
3329 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3330 *
3331 * @param pVCpu The cross context virtual CPU structure.
3332 * @param pVmcsInfo The VMCS info. object.
3333 *
3334 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3335 * @remarks Do -not- call this function directly!
3336 */
3337DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3338{
3339 uint64_t fRFlags;
3340 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3341 AssertRC(rc);
3342
3343 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3344 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3345
3346 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3347#ifndef IN_NEM_DARWIN
3348 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3349 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3350 { /* mostly likely */ }
3351 else
3352 {
3353 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3354 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3355 }
3356#else
3357 RT_NOREF(pVmcsInfo);
3358#endif
3359}
3360
3361
3362/**
3363 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3364 *
3365 * @param pVCpu The cross context virtual CPU structure.
3366 * @param pVmcsInfo The VMCS info. object.
3367 *
3368 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3369 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3370 * instead!!!
3371 */
3372DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3373{
3374 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3375 {
3376 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3377 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3378 }
3379}
3380
3381
3382/**
3383 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3384 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3385 */
3386DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3387{
3388 /*
3389 * We must import RIP here to set our EM interrupt-inhibited state.
3390 * We also import RFLAGS as our code that evaluates pending interrupts
3391 * before VM-entry requires it.
3392 */
3393 vmxHCImportGuestRip(pVCpu);
3394 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3395
3396 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3397 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3398 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3399 pVCpu->cpum.GstCtx.rip);
3400 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3401}
3402
3403
3404/**
3405 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3406 * context.
3407 *
3408 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3409 *
3410 * @param pVCpu The cross context virtual CPU structure.
3411 * @param pVmcsInfo The VMCS info. object.
3412 *
3413 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3414 * do not log!
3415 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3416 * instead!!!
3417 */
3418DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3419{
3420 uint32_t u32Val;
3421 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3422 if (!u32Val)
3423 {
3424 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3425 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3426 }
3427 else
3428 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3429}
3430
3431
3432/**
3433 * Worker for VMXR0ImportStateOnDemand.
3434 *
3435 * @returns VBox status code.
3436 * @param pVCpu The cross context virtual CPU structure.
3437 * @param pVmcsInfo The VMCS info. object.
3438 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3439 */
3440static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3441{
3442 int rc = VINF_SUCCESS;
3443 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3444 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3445 uint32_t u32Val;
3446
3447 /*
3448 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3449 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3450 * neither are other host platforms.
3451 *
3452 * Committing this temporarily as it prevents BSOD.
3453 *
3454 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3455 */
3456#ifdef RT_OS_WINDOWS
3457 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3458 return VERR_HM_IPE_1;
3459#endif
3460
3461 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3462
3463#ifndef IN_NEM_DARWIN
3464 /*
3465 * We disable interrupts to make the updating of the state and in particular
3466 * the fExtrn modification atomic wrt to preemption hooks.
3467 */
3468 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3469#endif
3470
3471 fWhat &= pCtx->fExtrn;
3472 if (fWhat)
3473 {
3474 do
3475 {
3476 if (fWhat & CPUMCTX_EXTRN_RIP)
3477 vmxHCImportGuestRip(pVCpu);
3478
3479 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3480 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3481
3482 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3483 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3484 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3485
3486 if (fWhat & CPUMCTX_EXTRN_RSP)
3487 {
3488 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3489 AssertRC(rc);
3490 }
3491
3492 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3493 {
3494 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3495#ifndef IN_NEM_DARWIN
3496 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3497#else
3498 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3499#endif
3500 if (fWhat & CPUMCTX_EXTRN_CS)
3501 {
3502 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3503 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3504 if (fRealOnV86Active)
3505 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3506 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_SS)
3509 {
3510 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3511 if (fRealOnV86Active)
3512 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3513 }
3514 if (fWhat & CPUMCTX_EXTRN_DS)
3515 {
3516 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3517 if (fRealOnV86Active)
3518 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3519 }
3520 if (fWhat & CPUMCTX_EXTRN_ES)
3521 {
3522 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3523 if (fRealOnV86Active)
3524 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3525 }
3526 if (fWhat & CPUMCTX_EXTRN_FS)
3527 {
3528 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3529 if (fRealOnV86Active)
3530 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3531 }
3532 if (fWhat & CPUMCTX_EXTRN_GS)
3533 {
3534 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3535 if (fRealOnV86Active)
3536 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3537 }
3538 }
3539
3540 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3541 {
3542 if (fWhat & CPUMCTX_EXTRN_LDTR)
3543 vmxHCImportGuestLdtr(pVCpu);
3544
3545 if (fWhat & CPUMCTX_EXTRN_GDTR)
3546 {
3547 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3548 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3549 pCtx->gdtr.cbGdt = u32Val;
3550 }
3551
3552 /* Guest IDTR. */
3553 if (fWhat & CPUMCTX_EXTRN_IDTR)
3554 {
3555 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3556 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3557 pCtx->idtr.cbIdt = u32Val;
3558 }
3559
3560 /* Guest TR. */
3561 if (fWhat & CPUMCTX_EXTRN_TR)
3562 {
3563#ifndef IN_NEM_DARWIN
3564 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3565 don't need to import that one. */
3566 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3567#endif
3568 vmxHCImportGuestTr(pVCpu);
3569 }
3570 }
3571
3572 if (fWhat & CPUMCTX_EXTRN_DR7)
3573 {
3574#ifndef IN_NEM_DARWIN
3575 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3576#endif
3577 {
3578 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3579 AssertRC(rc);
3580 }
3581 }
3582
3583 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3584 {
3585 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3586 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3587 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3588 pCtx->SysEnter.cs = u32Val;
3589 }
3590
3591#ifndef IN_NEM_DARWIN
3592 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3593 {
3594 if ( pVM->hmr0.s.fAllow64BitGuests
3595 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3596 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3597 }
3598
3599 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3600 {
3601 if ( pVM->hmr0.s.fAllow64BitGuests
3602 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3603 {
3604 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3605 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3606 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3607 }
3608 }
3609
3610 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3611 {
3612 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3613 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3614 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3615 Assert(pMsrs);
3616 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3617 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3618 for (uint32_t i = 0; i < cMsrs; i++)
3619 {
3620 uint32_t const idMsr = pMsrs[i].u32Msr;
3621 switch (idMsr)
3622 {
3623 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3624 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3625 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3626 default:
3627 {
3628 uint32_t idxLbrMsr;
3629 if (VM_IS_VMX_LBR(pVM))
3630 {
3631 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3632 {
3633 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3634 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3635 break;
3636 }
3637 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3638 {
3639 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3640 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3641 break;
3642 }
3643 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3644 {
3645 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3646 break;
3647 }
3648 /* Fallthru (no break) */
3649 }
3650 pCtx->fExtrn = 0;
3651 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3652 ASMSetFlags(fEFlags);
3653 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3654 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3655 }
3656 }
3657 }
3658 }
3659#endif
3660
3661 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3662 {
3663 if (fWhat & CPUMCTX_EXTRN_CR0)
3664 {
3665 uint64_t u64Cr0;
3666 uint64_t u64Shadow;
3667 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3668 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3669#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3670 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3671 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3672#else
3673 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3674 {
3675 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3676 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3677 }
3678 else
3679 {
3680 /*
3681 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3682 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3683 * re-construct CR0. See @bugref{9180#c95} for details.
3684 */
3685 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3686 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3687 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3688 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3689 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3690 }
3691#endif
3692#ifndef IN_NEM_DARWIN
3693 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3694#endif
3695 CPUMSetGuestCR0(pVCpu, u64Cr0);
3696#ifndef IN_NEM_DARWIN
3697 VMMRZCallRing3Enable(pVCpu);
3698#endif
3699 }
3700
3701 if (fWhat & CPUMCTX_EXTRN_CR4)
3702 {
3703 uint64_t u64Cr4;
3704 uint64_t u64Shadow;
3705 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3706 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3707#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3708 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3709 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3710#else
3711 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3712 {
3713 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3714 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3715 }
3716 else
3717 {
3718 /*
3719 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3720 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3721 * re-construct CR4. See @bugref{9180#c95} for details.
3722 */
3723 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3724 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3725 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3726 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3727 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3728 }
3729#endif
3730 pCtx->cr4 = u64Cr4;
3731 }
3732
3733 if (fWhat & CPUMCTX_EXTRN_CR3)
3734 {
3735 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3736 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3737 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3738 && CPUMIsGuestPagingEnabledEx(pCtx)))
3739 {
3740 uint64_t u64Cr3;
3741 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3742 if (pCtx->cr3 != u64Cr3)
3743 {
3744 pCtx->cr3 = u64Cr3;
3745 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3746 }
3747
3748 /*
3749 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3750 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3751 */
3752 if (CPUMIsGuestInPAEModeEx(pCtx))
3753 {
3754 X86PDPE aPaePdpes[4];
3755 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3756 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3757 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3758 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3759 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3760 {
3761 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3762 /* PGM now updates PAE PDPTEs while updating CR3. */
3763 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3764 }
3765 }
3766 }
3767 }
3768 }
3769
3770#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3771 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3772 {
3773 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3774 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3775 {
3776 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3777 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3778 if (RT_SUCCESS(rc))
3779 { /* likely */ }
3780 else
3781 break;
3782 }
3783 }
3784#endif
3785 } while (0);
3786
3787 if (RT_SUCCESS(rc))
3788 {
3789 /* Update fExtrn. */
3790 pCtx->fExtrn &= ~fWhat;
3791
3792 /* If everything has been imported, clear the HM keeper bit. */
3793 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3794 {
3795#ifndef IN_NEM_DARWIN
3796 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3797#else
3798 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3799#endif
3800 Assert(!pCtx->fExtrn);
3801 }
3802 }
3803 }
3804#ifndef IN_NEM_DARWIN
3805 else
3806 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3807
3808 /*
3809 * Restore interrupts.
3810 */
3811 ASMSetFlags(fEFlags);
3812#endif
3813
3814 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3815
3816 if (RT_SUCCESS(rc))
3817 { /* likely */ }
3818 else
3819 return rc;
3820
3821 /*
3822 * Honor any pending CR3 updates.
3823 *
3824 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3825 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3826 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3827 *
3828 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3829 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3830 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3831 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3832 *
3833 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3834 *
3835 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3836 */
3837 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3838#ifndef IN_NEM_DARWIN
3839 && VMMRZCallRing3IsEnabled(pVCpu)
3840#endif
3841 )
3842 {
3843 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3844 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3845 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3846 }
3847
3848 return VINF_SUCCESS;
3849}
3850
3851
3852/**
3853 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3854 *
3855 * @returns VBox status code.
3856 * @param pVCpu The cross context virtual CPU structure.
3857 * @param pVmcsInfo The VMCS info. object.
3858 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3859 * in NEM/darwin context.
3860 * @tparam a_fWhat What to import, zero or more bits from
3861 * HMVMX_CPUMCTX_EXTRN_ALL.
3862 */
3863template<uint64_t const a_fWhat>
3864static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3865{
3866 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3867 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3868 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3869 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3870
3871 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3872
3873 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3874
3875 /* RIP and RFLAGS may have been imported already by the post exit code
3876 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3877 of the code is skipping this part of the code. */
3878 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3879 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3880 {
3881 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3882 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3883
3884 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3885 {
3886 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3887 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3888 else
3889 vmxHCImportGuestCoreRip(pVCpu);
3890 }
3891 }
3892
3893 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3894 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3895 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3896
3897 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3898 {
3899 if (a_fWhat & CPUMCTX_EXTRN_CS)
3900 {
3901 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3902 /** @todo try get rid of this carp, it smells and is probably never ever
3903 * used: */
3904 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3905 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3906 {
3907 vmxHCImportGuestCoreRip(pVCpu);
3908 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3909 }
3910 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3911 }
3912 if (a_fWhat & CPUMCTX_EXTRN_SS)
3913 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3914 if (a_fWhat & CPUMCTX_EXTRN_DS)
3915 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3916 if (a_fWhat & CPUMCTX_EXTRN_ES)
3917 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3918 if (a_fWhat & CPUMCTX_EXTRN_FS)
3919 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3920 if (a_fWhat & CPUMCTX_EXTRN_GS)
3921 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3922
3923 /* Guest TR.
3924 Real-mode emulation using virtual-8086 mode has the fake TSS
3925 (pRealModeTSS) in TR, don't need to import that one. */
3926#ifndef IN_NEM_DARWIN
3927 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3928 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3929 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3930#else
3931 if (a_fWhat & CPUMCTX_EXTRN_TR)
3932#endif
3933 vmxHCImportGuestTr(pVCpu);
3934
3935#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3936 if (fRealOnV86Active)
3937 {
3938 if (a_fWhat & CPUMCTX_EXTRN_CS)
3939 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3940 if (a_fWhat & CPUMCTX_EXTRN_SS)
3941 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3942 if (a_fWhat & CPUMCTX_EXTRN_DS)
3943 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3944 if (a_fWhat & CPUMCTX_EXTRN_ES)
3945 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3946 if (a_fWhat & CPUMCTX_EXTRN_FS)
3947 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3948 if (a_fWhat & CPUMCTX_EXTRN_GS)
3949 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3950 }
3951#endif
3952 }
3953
3954 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3955 {
3956 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3957 AssertRC(rc);
3958 }
3959
3960 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3961 vmxHCImportGuestLdtr(pVCpu);
3962
3963 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3964 {
3965 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3966 uint32_t u32Val;
3967 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3968 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3969 }
3970
3971 /* Guest IDTR. */
3972 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3973 {
3974 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3975 uint32_t u32Val;
3976 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3977 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3978 }
3979
3980 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3981 {
3982#ifndef IN_NEM_DARWIN
3983 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3984#endif
3985 {
3986 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3987 AssertRC(rc);
3988 }
3989 }
3990
3991 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3992 {
3993 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
3994 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
3995 uint32_t u32Val;
3996 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
3997 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
3998 }
3999
4000#ifndef IN_NEM_DARWIN
4001 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4002 {
4003 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4004 && pVM->hmr0.s.fAllow64BitGuests)
4005 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4006 }
4007
4008 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4009 {
4010 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4011 && pVM->hmr0.s.fAllow64BitGuests)
4012 {
4013 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4014 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4015 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4016 }
4017 }
4018
4019 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4020 {
4021 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4022 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4023 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4024 Assert(pMsrs);
4025 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4026 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4027 for (uint32_t i = 0; i < cMsrs; i++)
4028 {
4029 uint32_t const idMsr = pMsrs[i].u32Msr;
4030 switch (idMsr)
4031 {
4032 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4033 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4034 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4035 default:
4036 {
4037 uint32_t idxLbrMsr;
4038 if (VM_IS_VMX_LBR(pVM))
4039 {
4040 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4041 {
4042 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4043 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4044 break;
4045 }
4046 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4047 {
4048 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4049 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4050 break;
4051 }
4052 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4053 {
4054 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4055 break;
4056 }
4057 }
4058 pVCpu->cpum.GstCtx.fExtrn = 0;
4059 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4060 ASMSetFlags(fEFlags);
4061 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4062 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4063 }
4064 }
4065 }
4066 }
4067#endif
4068
4069 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4070 {
4071 uint64_t u64Cr0;
4072 uint64_t u64Shadow;
4073 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4074 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4075#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4076 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4077 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4078#else
4079 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4080 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4081 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4082 else
4083 {
4084 /*
4085 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4086 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4087 * re-construct CR0. See @bugref{9180#c95} for details.
4088 */
4089 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4090 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4091 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4092 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4093 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4094 }
4095#endif
4096#ifndef IN_NEM_DARWIN
4097 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4098#endif
4099 CPUMSetGuestCR0(pVCpu, u64Cr0);
4100#ifndef IN_NEM_DARWIN
4101 VMMRZCallRing3Enable(pVCpu);
4102#endif
4103 }
4104
4105 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4106 {
4107 uint64_t u64Cr4;
4108 uint64_t u64Shadow;
4109 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4110 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4111#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4112 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4113 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4114#else
4115 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4116 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4117 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4118 else
4119 {
4120 /*
4121 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4122 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4123 * re-construct CR4. See @bugref{9180#c95} for details.
4124 */
4125 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4126 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4127 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4128 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4129 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4130 }
4131#endif
4132 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4133 }
4134
4135 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4136 {
4137 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4138 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4139 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4140 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4141 {
4142 uint64_t u64Cr3;
4143 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4144 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4145 {
4146 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4147 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4148 }
4149
4150 /*
4151 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4152 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4153 */
4154 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4155 {
4156 X86PDPE aPaePdpes[4];
4157 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4158 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4159 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4160 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4161 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4162 {
4163 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4164 /* PGM now updates PAE PDPTEs while updating CR3. */
4165 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4166 }
4167 }
4168 }
4169 }
4170
4171#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4172 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4173 {
4174 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4175 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4176 {
4177 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4178 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4179 AssertRCReturn(rc, rc);
4180 }
4181 }
4182#endif
4183
4184 /* Update fExtrn. */
4185 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4186
4187 /* If everything has been imported, clear the HM keeper bit. */
4188 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4189 {
4190#ifndef IN_NEM_DARWIN
4191 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4192#else
4193 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4194#endif
4195 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4196 }
4197
4198 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4199
4200 /*
4201 * Honor any pending CR3 updates.
4202 *
4203 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4204 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4205 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4206 *
4207 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4208 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4209 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4210 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4211 *
4212 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4213 *
4214 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4215 */
4216#ifndef IN_NEM_DARWIN
4217 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4218 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4219 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4220 return VINF_SUCCESS;
4221 ASMSetFlags(fEFlags);
4222#else
4223 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4224 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4225 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4226 return VINF_SUCCESS;
4227 RT_NOREF_PV(fEFlags);
4228#endif
4229
4230 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4231 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4232 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4233 return VINF_SUCCESS;
4234}
4235
4236
4237/**
4238 * Internal state fetcher.
4239 *
4240 * @returns VBox status code.
4241 * @param pVCpu The cross context virtual CPU structure.
4242 * @param pVmcsInfo The VMCS info. object.
4243 * @param pszCaller For logging.
4244 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4245 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4246 * already. This is ORed together with @a a_fWhat when
4247 * calculating what needs fetching (just for safety).
4248 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4249 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4250 * already. This is ORed together with @a a_fWhat when
4251 * calculating what needs fetching (just for safety).
4252 */
4253template<uint64_t const a_fWhat,
4254 uint64_t const a_fDoneLocal = 0,
4255 uint64_t const a_fDonePostExit = 0
4256#ifndef IN_NEM_DARWIN
4257 | CPUMCTX_EXTRN_INHIBIT_INT
4258 | CPUMCTX_EXTRN_INHIBIT_NMI
4259# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4260 | HMVMX_CPUMCTX_EXTRN_ALL
4261# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4262 | CPUMCTX_EXTRN_RFLAGS
4263# endif
4264#else /* IN_NEM_DARWIN */
4265 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4266#endif /* IN_NEM_DARWIN */
4267>
4268DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4269{
4270 RT_NOREF_PV(pszCaller);
4271 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4272 {
4273#ifndef IN_NEM_DARWIN
4274 /*
4275 * We disable interrupts to make the updating of the state and in particular
4276 * the fExtrn modification atomic wrt to preemption hooks.
4277 */
4278 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4279#else
4280 RTCCUINTREG const fEFlags = 0;
4281#endif
4282
4283 /*
4284 * We combine all three parameters and take the (probably) inlined optimized
4285 * code path for the new things specified in a_fWhat.
4286 *
4287 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4288 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4289 * also take the streamlined path when both of these are cleared in fExtrn
4290 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4291 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4292 */
4293 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4294 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4295 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4296 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4297 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4298 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4299 {
4300 int const rc = vmxHCImportGuestStateInner< a_fWhat
4301 & HMVMX_CPUMCTX_EXTRN_ALL
4302 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4303#ifndef IN_NEM_DARWIN
4304 ASMSetFlags(fEFlags);
4305#endif
4306 return rc;
4307 }
4308
4309#ifndef IN_NEM_DARWIN
4310 ASMSetFlags(fEFlags);
4311#endif
4312
4313 /*
4314 * We shouldn't normally get here, but it may happen when executing
4315 * in the debug run-loops. Typically, everything should already have
4316 * been fetched then. Otherwise call the fallback state import function.
4317 */
4318 if (fWhatToDo == 0)
4319 { /* hope the cause was the debug loop or something similar */ }
4320 else
4321 {
4322 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4323 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4324 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4325 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4326 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4327 }
4328 }
4329 return VINF_SUCCESS;
4330}
4331
4332
4333/**
4334 * Check per-VM and per-VCPU force flag actions that require us to go back to
4335 * ring-3 for one reason or another.
4336 *
4337 * @returns Strict VBox status code (i.e. informational status codes too)
4338 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4339 * ring-3.
4340 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4341 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4342 * interrupts)
4343 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4344 * all EMTs to be in ring-3.
4345 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4346 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4347 * to the EM loop.
4348 *
4349 * @param pVCpu The cross context virtual CPU structure.
4350 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4351 * @param fStepping Whether we are single-stepping the guest using the
4352 * hypervisor debugger.
4353 *
4354 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4355 * is no longer in VMX non-root mode.
4356 */
4357static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4358{
4359#ifndef IN_NEM_DARWIN
4360 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4361#endif
4362
4363 /*
4364 * Update pending interrupts into the APIC's IRR.
4365 */
4366 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4367 APICUpdatePendingInterrupts(pVCpu);
4368
4369 /*
4370 * Anything pending? Should be more likely than not if we're doing a good job.
4371 */
4372 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4373 if ( !fStepping
4374 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4375 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4376 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4377 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4378 return VINF_SUCCESS;
4379
4380 /* Pending PGM C3 sync. */
4381 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4382 {
4383 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4384 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4385 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4386 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4387 if (rcStrict != VINF_SUCCESS)
4388 {
4389 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4390 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4391 return rcStrict;
4392 }
4393 }
4394
4395 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4396 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4397 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4398 {
4399 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4400 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4401 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4402 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4403 return rc;
4404 }
4405
4406 /* Pending VM request packets, such as hardware interrupts. */
4407 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4408 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4409 {
4410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4411 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4412 return VINF_EM_PENDING_REQUEST;
4413 }
4414
4415 /* Pending PGM pool flushes. */
4416 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4417 {
4418 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4419 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4420 return VINF_PGM_POOL_FLUSH_PENDING;
4421 }
4422
4423 /* Pending DMA requests. */
4424 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4425 {
4426 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4427 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4428 return VINF_EM_RAW_TO_R3;
4429 }
4430
4431#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4432 /*
4433 * Pending nested-guest events.
4434 *
4435 * Please note the priority of these events are specified and important.
4436 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4437 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4438 */
4439 if (fIsNestedGuest)
4440 {
4441 /* Pending nested-guest APIC-write. */
4442 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4443 {
4444 Log4Func(("Pending nested-guest APIC-write\n"));
4445 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4446 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4447 return rcStrict;
4448 }
4449
4450 /* Pending nested-guest monitor-trap flag (MTF). */
4451 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4452 {
4453 Log4Func(("Pending nested-guest MTF\n"));
4454 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4455 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4456 return rcStrict;
4457 }
4458
4459 /* Pending nested-guest VMX-preemption timer expired. */
4460 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4461 {
4462 Log4Func(("Pending nested-guest preempt timer\n"));
4463 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4464 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4465 return rcStrict;
4466 }
4467 }
4468#else
4469 NOREF(fIsNestedGuest);
4470#endif
4471
4472 return VINF_SUCCESS;
4473}
4474
4475
4476/**
4477 * Converts any TRPM trap into a pending HM event. This is typically used when
4478 * entering from ring-3 (not longjmp returns).
4479 *
4480 * @param pVCpu The cross context virtual CPU structure.
4481 */
4482static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4483{
4484 Assert(TRPMHasTrap(pVCpu));
4485 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4486
4487 uint8_t uVector;
4488 TRPMEVENT enmTrpmEvent;
4489 uint32_t uErrCode;
4490 RTGCUINTPTR GCPtrFaultAddress;
4491 uint8_t cbInstr;
4492 bool fIcebp;
4493
4494 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4495 AssertRC(rc);
4496
4497 uint32_t u32IntInfo;
4498 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4499 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4500
4501 rc = TRPMResetTrap(pVCpu);
4502 AssertRC(rc);
4503 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4504 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4505
4506 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4507}
4508
4509
4510/**
4511 * Converts the pending HM event into a TRPM trap.
4512 *
4513 * @param pVCpu The cross context virtual CPU structure.
4514 */
4515static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4516{
4517 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4518
4519 /* If a trap was already pending, we did something wrong! */
4520 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4521
4522 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4523 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4524 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4525
4526 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4527
4528 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4529 AssertRC(rc);
4530
4531 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4532 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4533
4534 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4535 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4536 else
4537 {
4538 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4539 switch (uVectorType)
4540 {
4541 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4542 TRPMSetTrapDueToIcebp(pVCpu);
4543 RT_FALL_THRU();
4544 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4545 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4546 {
4547 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4548 || ( uVector == X86_XCPT_BP /* INT3 */
4549 || uVector == X86_XCPT_OF /* INTO */
4550 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4551 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4552 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4553 break;
4554 }
4555 }
4556 }
4557
4558 /* We're now done converting the pending event. */
4559 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4560}
4561
4562
4563/**
4564 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4565 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4566 *
4567 * @param pVCpu The cross context virtual CPU structure.
4568 * @param pVmcsInfo The VMCS info. object.
4569 */
4570static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4571{
4572 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4573 {
4574 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4575 {
4576 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4577 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4578 AssertRC(rc);
4579 }
4580 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4581}
4582
4583
4584/**
4585 * Clears the interrupt-window exiting control in the VMCS.
4586 *
4587 * @param pVCpu The cross context virtual CPU structure.
4588 * @param pVmcsInfo The VMCS info. object.
4589 */
4590DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4591{
4592 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4593 {
4594 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4595 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4596 AssertRC(rc);
4597 }
4598}
4599
4600
4601/**
4602 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4603 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4604 *
4605 * @param pVCpu The cross context virtual CPU structure.
4606 * @param pVmcsInfo The VMCS info. object.
4607 */
4608static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4609{
4610 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4611 {
4612 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4613 {
4614 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4615 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4616 AssertRC(rc);
4617 Log4Func(("Setup NMI-window exiting\n"));
4618 }
4619 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4620}
4621
4622
4623/**
4624 * Clears the NMI-window exiting control in the VMCS.
4625 *
4626 * @param pVCpu The cross context virtual CPU structure.
4627 * @param pVmcsInfo The VMCS info. object.
4628 */
4629DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4630{
4631 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4632 {
4633 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4635 AssertRC(rc);
4636 }
4637}
4638
4639
4640/**
4641 * Injects an event into the guest upon VM-entry by updating the relevant fields
4642 * in the VM-entry area in the VMCS.
4643 *
4644 * @returns Strict VBox status code (i.e. informational status codes too).
4645 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4646 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4647 *
4648 * @param pVCpu The cross context virtual CPU structure.
4649 * @param pVmcsInfo The VMCS info object.
4650 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4651 * @param pEvent The event being injected.
4652 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4653 * will be updated if necessary. This cannot not be NULL.
4654 * @param fStepping Whether we're single-stepping guest execution and should
4655 * return VINF_EM_DBG_STEPPED if the event is injected
4656 * directly (registers modified by us, not by hardware on
4657 * VM-entry).
4658 */
4659static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4660 bool fStepping, uint32_t *pfIntrState)
4661{
4662 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4663 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4664 Assert(pfIntrState);
4665
4666#ifdef IN_NEM_DARWIN
4667 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4668#endif
4669
4670 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4671 uint32_t u32IntInfo = pEvent->u64IntInfo;
4672 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4673 uint32_t const cbInstr = pEvent->cbInstr;
4674 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4675 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4676 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4677
4678#ifdef VBOX_STRICT
4679 /*
4680 * Validate the error-code-valid bit for hardware exceptions.
4681 * No error codes for exceptions in real-mode.
4682 *
4683 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4684 */
4685 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4686 && !CPUMIsGuestInRealModeEx(pCtx))
4687 {
4688 switch (uVector)
4689 {
4690 case X86_XCPT_PF:
4691 case X86_XCPT_DF:
4692 case X86_XCPT_TS:
4693 case X86_XCPT_NP:
4694 case X86_XCPT_SS:
4695 case X86_XCPT_GP:
4696 case X86_XCPT_AC:
4697 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4698 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4699 RT_FALL_THRU();
4700 default:
4701 break;
4702 }
4703 }
4704
4705 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4706 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4707 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4708#endif
4709
4710 RT_NOREF(uVector);
4711 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4712 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4713 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4714 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4715 {
4716 Assert(uVector <= X86_XCPT_LAST);
4717 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4718 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4719 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4720 }
4721 else
4722 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4723
4724 /*
4725 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4726 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4727 * interrupt handler in the (real-mode) guest.
4728 *
4729 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4730 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4731 */
4732 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4733 {
4734#ifndef IN_NEM_DARWIN
4735 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4736#endif
4737 {
4738 /*
4739 * For CPUs with unrestricted guest execution enabled and with the guest
4740 * in real-mode, we must not set the deliver-error-code bit.
4741 *
4742 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4743 */
4744 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4745 }
4746#ifndef IN_NEM_DARWIN
4747 else
4748 {
4749 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4750 Assert(PDMVmmDevHeapIsEnabled(pVM));
4751 Assert(pVM->hm.s.vmx.pRealModeTSS);
4752 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4753
4754 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4755 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4756 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4757 AssertRCReturn(rc2, rc2);
4758
4759 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4760 size_t const cbIdtEntry = sizeof(X86IDTR16);
4761 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4762 {
4763 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4764 if (uVector == X86_XCPT_DF)
4765 return VINF_EM_RESET;
4766
4767 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4768 No error codes for exceptions in real-mode. */
4769 if (uVector == X86_XCPT_GP)
4770 {
4771 static HMEVENT const s_EventXcptDf
4772 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4773 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4774 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4775 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4776 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4777 }
4778
4779 /*
4780 * If we're injecting an event with no valid IDT entry, inject a #GP.
4781 * No error codes for exceptions in real-mode.
4782 *
4783 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4784 */
4785 static HMEVENT const s_EventXcptGp
4786 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4787 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4788 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4789 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4790 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4791 }
4792
4793 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4794 uint16_t uGuestIp = pCtx->ip;
4795 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4796 {
4797 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4798 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4799 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4800 }
4801 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4802 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4803
4804 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4805 X86IDTR16 IdtEntry;
4806 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4807 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4808 AssertRCReturn(rc2, rc2);
4809
4810 /* Construct the stack frame for the interrupt/exception handler. */
4811 VBOXSTRICTRC rcStrict;
4812 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4813 if (rcStrict == VINF_SUCCESS)
4814 {
4815 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4816 if (rcStrict == VINF_SUCCESS)
4817 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4818 }
4819
4820 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4821 if (rcStrict == VINF_SUCCESS)
4822 {
4823 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4824 pCtx->rip = IdtEntry.offSel;
4825 pCtx->cs.Sel = IdtEntry.uSel;
4826 pCtx->cs.ValidSel = IdtEntry.uSel;
4827 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4828 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4829 && uVector == X86_XCPT_PF)
4830 pCtx->cr2 = GCPtrFault;
4831
4832 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4833 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4834 | HM_CHANGED_GUEST_RSP);
4835
4836 /*
4837 * If we delivered a hardware exception (other than an NMI) and if there was
4838 * block-by-STI in effect, we should clear it.
4839 */
4840 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4841 {
4842 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4843 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4844 Log4Func(("Clearing inhibition due to STI\n"));
4845 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4846 }
4847
4848 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4849 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4850
4851 /*
4852 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4853 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4854 */
4855 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4856
4857 /*
4858 * If we eventually support nested-guest execution without unrestricted guest execution,
4859 * we should set fInterceptEvents here.
4860 */
4861 Assert(!fIsNestedGuest);
4862
4863 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4864 if (fStepping)
4865 rcStrict = VINF_EM_DBG_STEPPED;
4866 }
4867 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4868 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4869 return rcStrict;
4870 }
4871#else
4872 RT_NOREF(pVmcsInfo);
4873#endif
4874 }
4875
4876 /*
4877 * Validate.
4878 */
4879 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4880 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4881
4882 /*
4883 * Inject the event into the VMCS.
4884 */
4885 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4886 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4887 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4888 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4889 AssertRC(rc);
4890
4891 /*
4892 * Update guest CR2 if this is a page-fault.
4893 */
4894 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4895 pCtx->cr2 = GCPtrFault;
4896
4897 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4898 return VINF_SUCCESS;
4899}
4900
4901
4902/**
4903 * Evaluates the event to be delivered to the guest and sets it as the pending
4904 * event.
4905 *
4906 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4907 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4908 * NOT restore these force-flags.
4909 *
4910 * @returns Strict VBox status code (i.e. informational status codes too).
4911 * @param pVCpu The cross context virtual CPU structure.
4912 * @param pVmcsInfo The VMCS information structure.
4913 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4914 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4915 */
4916static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4917{
4918 Assert(pfIntrState);
4919 Assert(!TRPMHasTrap(pVCpu));
4920
4921 /*
4922 * Compute/update guest-interruptibility state related FFs.
4923 * The FFs will be used below while evaluating events to be injected.
4924 */
4925 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4926
4927 /*
4928 * Evaluate if a new event needs to be injected.
4929 * An event that's already pending has already performed all necessary checks.
4930 */
4931 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4932 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4933 {
4934 /** @todo SMI. SMIs take priority over NMIs. */
4935
4936 /*
4937 * NMIs.
4938 * NMIs take priority over external interrupts.
4939 */
4940#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4941 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4942#endif
4943 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4944 {
4945 /*
4946 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4947 *
4948 * For a nested-guest, the FF always indicates the outer guest's ability to
4949 * receive an NMI while the guest-interruptibility state bit depends on whether
4950 * the nested-hypervisor is using virtual-NMIs.
4951 */
4952 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4953 {
4954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4955 if ( fIsNestedGuest
4956 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4957 return IEMExecVmxVmexitXcptNmi(pVCpu);
4958#endif
4959 vmxHCSetPendingXcptNmi(pVCpu);
4960 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4961 Log4Func(("NMI pending injection\n"));
4962
4963 /* We've injected the NMI, bail. */
4964 return VINF_SUCCESS;
4965 }
4966 if (!fIsNestedGuest)
4967 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4968 }
4969
4970 /*
4971 * External interrupts (PIC/APIC).
4972 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4973 * We cannot re-request the interrupt from the controller again.
4974 */
4975 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4976 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4977 {
4978 Assert(!DBGFIsStepping(pVCpu));
4979 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4980 AssertRC(rc);
4981
4982 /*
4983 * We must not check EFLAGS directly when executing a nested-guest, use
4984 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4985 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4986 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4987 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4988 *
4989 * See Intel spec. 25.4.1 "Event Blocking".
4990 */
4991 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4992 {
4993#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4994 if ( fIsNestedGuest
4995 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4996 {
4997 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4998 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4999 return rcStrict;
5000 }
5001#endif
5002 uint8_t u8Interrupt;
5003 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5004 if (RT_SUCCESS(rc))
5005 {
5006#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5007 if ( fIsNestedGuest
5008 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5009 {
5010 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5011 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5012 return rcStrict;
5013 }
5014#endif
5015 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5016 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5017 }
5018 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5019 {
5020 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5021
5022 if ( !fIsNestedGuest
5023 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5024 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5025 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5026
5027 /*
5028 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5029 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5030 * need to re-set this force-flag here.
5031 */
5032 }
5033 else
5034 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5035
5036 /* We've injected the interrupt or taken necessary action, bail. */
5037 return VINF_SUCCESS;
5038 }
5039 if (!fIsNestedGuest)
5040 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5041 }
5042 }
5043 else if (!fIsNestedGuest)
5044 {
5045 /*
5046 * An event is being injected or we are in an interrupt shadow. Check if another event is
5047 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5048 * the pending event.
5049 */
5050 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5051 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5052 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5053 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5054 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5055 }
5056 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5057
5058 return VINF_SUCCESS;
5059}
5060
5061
5062/**
5063 * Injects any pending events into the guest if the guest is in a state to
5064 * receive them.
5065 *
5066 * @returns Strict VBox status code (i.e. informational status codes too).
5067 * @param pVCpu The cross context virtual CPU structure.
5068 * @param pVmcsInfo The VMCS information structure.
5069 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5070 * @param fIntrState The VT-x guest-interruptibility state.
5071 * @param fStepping Whether we are single-stepping the guest using the
5072 * hypervisor debugger and should return
5073 * VINF_EM_DBG_STEPPED if the event was dispatched
5074 * directly.
5075 */
5076static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5077 uint32_t fIntrState, bool fStepping)
5078{
5079 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5080#ifndef IN_NEM_DARWIN
5081 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5082#endif
5083
5084#ifdef VBOX_STRICT
5085 /*
5086 * Verify guest-interruptibility state.
5087 *
5088 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5089 * since injecting an event may modify the interruptibility state and we must thus always
5090 * use fIntrState.
5091 */
5092 {
5093 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5094 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5095 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5096 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5097 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5098 Assert(!TRPMHasTrap(pVCpu));
5099 NOREF(fBlockMovSS); NOREF(fBlockSti);
5100 }
5101#endif
5102
5103 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5104 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5105 {
5106 /*
5107 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5108 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5109 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5110 *
5111 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5112 */
5113 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5114#ifdef VBOX_STRICT
5115 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5116 {
5117 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5118 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5119 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5120 }
5121 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5122 {
5123 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5124 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5125 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5126 }
5127#endif
5128 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5129 uIntType));
5130
5131 /*
5132 * Inject the event and get any changes to the guest-interruptibility state.
5133 *
5134 * The guest-interruptibility state may need to be updated if we inject the event
5135 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5136 */
5137 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5138 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5139
5140 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5141 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5142 else
5143 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5144 }
5145
5146 /*
5147 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5148 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5149 */
5150 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5151 && !fIsNestedGuest)
5152 {
5153 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5154
5155 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5156 {
5157 /*
5158 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5159 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5160 */
5161 Assert(!DBGFIsStepping(pVCpu));
5162 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5163 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5164 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5165 AssertRC(rc);
5166 }
5167 else
5168 {
5169 /*
5170 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5171 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5172 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5173 * we use MTF, so just make sure it's called before executing guest-code.
5174 */
5175 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5176 }
5177 }
5178 /* else: for nested-guest currently handling while merging controls. */
5179
5180 /*
5181 * Finally, update the guest-interruptibility state.
5182 *
5183 * This is required for the real-on-v86 software interrupt injection, for
5184 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5185 */
5186 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5187 AssertRC(rc);
5188
5189 /*
5190 * There's no need to clear the VM-entry interruption-information field here if we're not
5191 * injecting anything. VT-x clears the valid bit on every VM-exit.
5192 *
5193 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5194 */
5195
5196 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5197 return rcStrict;
5198}
5199
5200
5201/**
5202 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5203 * and update error record fields accordingly.
5204 *
5205 * @returns VMX_IGS_* error codes.
5206 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5207 * wrong with the guest state.
5208 *
5209 * @param pVCpu The cross context virtual CPU structure.
5210 * @param pVmcsInfo The VMCS info. object.
5211 *
5212 * @remarks This function assumes our cache of the VMCS controls
5213 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5214 */
5215static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5216{
5217#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5218#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5219
5220 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5221 uint32_t uError = VMX_IGS_ERROR;
5222 uint32_t u32IntrState = 0;
5223#ifndef IN_NEM_DARWIN
5224 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5225 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5226#else
5227 bool const fUnrestrictedGuest = true;
5228#endif
5229 do
5230 {
5231 int rc;
5232
5233 /*
5234 * Guest-interruptibility state.
5235 *
5236 * Read this first so that any check that fails prior to those that actually
5237 * require the guest-interruptibility state would still reflect the correct
5238 * VMCS value and avoids causing further confusion.
5239 */
5240 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5241 AssertRC(rc);
5242
5243 uint32_t u32Val;
5244 uint64_t u64Val;
5245
5246 /*
5247 * CR0.
5248 */
5249 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5250 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5251 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5252 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5253 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5254 if (fUnrestrictedGuest)
5255 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5256
5257 uint64_t u64GuestCr0;
5258 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5259 AssertRC(rc);
5260 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5261 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5262 if ( !fUnrestrictedGuest
5263 && (u64GuestCr0 & X86_CR0_PG)
5264 && !(u64GuestCr0 & X86_CR0_PE))
5265 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5266
5267 /*
5268 * CR4.
5269 */
5270 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5271 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5272 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5273
5274 uint64_t u64GuestCr4;
5275 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5276 AssertRC(rc);
5277 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5278 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5279
5280 /*
5281 * IA32_DEBUGCTL MSR.
5282 */
5283 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5284 AssertRC(rc);
5285 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5286 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5287 {
5288 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5289 }
5290 uint64_t u64DebugCtlMsr = u64Val;
5291
5292#ifdef VBOX_STRICT
5293 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5294 AssertRC(rc);
5295 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5296#endif
5297 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5298
5299 /*
5300 * RIP and RFLAGS.
5301 */
5302 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5303 AssertRC(rc);
5304 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5305 if ( !fLongModeGuest
5306 || !pCtx->cs.Attr.n.u1Long)
5307 {
5308 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5309 }
5310 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5311 * must be identical if the "IA-32e mode guest" VM-entry
5312 * control is 1 and CS.L is 1. No check applies if the
5313 * CPU supports 64 linear-address bits. */
5314
5315 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5316 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5317 AssertRC(rc);
5318 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5319 VMX_IGS_RFLAGS_RESERVED);
5320 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5321 uint32_t const u32Eflags = u64Val;
5322
5323 if ( fLongModeGuest
5324 || ( fUnrestrictedGuest
5325 && !(u64GuestCr0 & X86_CR0_PE)))
5326 {
5327 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5328 }
5329
5330 uint32_t u32EntryInfo;
5331 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5332 AssertRC(rc);
5333 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5334 {
5335 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5336 }
5337
5338 /*
5339 * 64-bit checks.
5340 */
5341 if (fLongModeGuest)
5342 {
5343 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5344 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5345 }
5346
5347 if ( !fLongModeGuest
5348 && (u64GuestCr4 & X86_CR4_PCIDE))
5349 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5350
5351 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5352 * 51:32 beyond the processor's physical-address width are 0. */
5353
5354 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5355 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5356 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5357
5358#ifndef IN_NEM_DARWIN
5359 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5360 AssertRC(rc);
5361 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5362
5363 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5364 AssertRC(rc);
5365 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5366#endif
5367
5368 /*
5369 * PERF_GLOBAL MSR.
5370 */
5371 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5372 {
5373 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5374 AssertRC(rc);
5375 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5376 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5377 }
5378
5379 /*
5380 * PAT MSR.
5381 */
5382 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5383 {
5384 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5385 AssertRC(rc);
5386 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5387 for (unsigned i = 0; i < 8; i++)
5388 {
5389 uint8_t u8Val = (u64Val & 0xff);
5390 if ( u8Val != 0 /* UC */
5391 && u8Val != 1 /* WC */
5392 && u8Val != 4 /* WT */
5393 && u8Val != 5 /* WP */
5394 && u8Val != 6 /* WB */
5395 && u8Val != 7 /* UC- */)
5396 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5397 u64Val >>= 8;
5398 }
5399 }
5400
5401 /*
5402 * EFER MSR.
5403 */
5404 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5405 {
5406 Assert(g_fHmVmxSupportsVmcsEfer);
5407 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5408 AssertRC(rc);
5409 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5410 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5411 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5412 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5413 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5414 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5415 * iemVmxVmentryCheckGuestState(). */
5416 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5417 || !(u64GuestCr0 & X86_CR0_PG)
5418 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5419 VMX_IGS_EFER_LMA_LME_MISMATCH);
5420 }
5421
5422 /*
5423 * Segment registers.
5424 */
5425 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5426 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5427 if (!(u32Eflags & X86_EFL_VM))
5428 {
5429 /* CS */
5430 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5431 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5432 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5433 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5434 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5435 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5436 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5437 /* CS cannot be loaded with NULL in protected mode. */
5438 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5439 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5440 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5441 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5442 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5443 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5444 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5445 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5446 else
5447 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5448
5449 /* SS */
5450 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5451 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5452 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5453 if ( !(pCtx->cr0 & X86_CR0_PE)
5454 || pCtx->cs.Attr.n.u4Type == 3)
5455 {
5456 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5457 }
5458
5459 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5460 {
5461 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5462 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5463 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5464 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5465 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5466 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5467 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5468 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5469 }
5470
5471 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5472 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5473 {
5474 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5475 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5476 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5477 || pCtx->ds.Attr.n.u4Type > 11
5478 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5479 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5480 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5481 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5482 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5483 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5484 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5485 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5486 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5487 }
5488 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5489 {
5490 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5491 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5492 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5493 || pCtx->es.Attr.n.u4Type > 11
5494 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5495 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5496 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5497 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5498 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5499 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5500 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5501 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5502 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5503 }
5504 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5505 {
5506 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5507 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5508 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5509 || pCtx->fs.Attr.n.u4Type > 11
5510 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5511 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5512 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5513 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5514 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5515 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5516 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5517 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5518 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5519 }
5520 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5521 {
5522 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5523 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5524 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5525 || pCtx->gs.Attr.n.u4Type > 11
5526 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5527 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5528 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5529 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5530 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5531 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5532 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5533 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5534 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5535 }
5536 /* 64-bit capable CPUs. */
5537 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5538 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5539 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5540 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5541 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5542 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5543 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5544 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5545 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5546 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5547 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5548 }
5549 else
5550 {
5551 /* V86 mode checks. */
5552 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5553 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5554 {
5555 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5556 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5557 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5558 }
5559 else
5560 {
5561 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5562 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5563 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5564 }
5565
5566 /* CS */
5567 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5568 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5569 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5570 /* SS */
5571 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5572 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5573 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5574 /* DS */
5575 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5576 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5577 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5578 /* ES */
5579 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5580 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5581 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5582 /* FS */
5583 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5584 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5585 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5586 /* GS */
5587 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5588 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5589 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5590 /* 64-bit capable CPUs. */
5591 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5592 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5593 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5594 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5595 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5596 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5597 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5598 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5599 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5600 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5601 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5602 }
5603
5604 /*
5605 * TR.
5606 */
5607 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5608 /* 64-bit capable CPUs. */
5609 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5610 if (fLongModeGuest)
5611 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5612 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5613 else
5614 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5615 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5616 VMX_IGS_TR_ATTR_TYPE_INVALID);
5617 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5618 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5619 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5620 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5621 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5622 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5623 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5624 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5625
5626 /*
5627 * GDTR and IDTR (64-bit capable checks).
5628 */
5629 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5630 AssertRC(rc);
5631 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5632
5633 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5634 AssertRC(rc);
5635 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5636
5637 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5638 AssertRC(rc);
5639 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5640
5641 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5642 AssertRC(rc);
5643 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5644
5645 /*
5646 * Guest Non-Register State.
5647 */
5648 /* Activity State. */
5649 uint32_t u32ActivityState;
5650 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5651 AssertRC(rc);
5652 HMVMX_CHECK_BREAK( !u32ActivityState
5653 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5654 VMX_IGS_ACTIVITY_STATE_INVALID);
5655 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5656 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5657
5658 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5659 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5660 {
5661 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5662 }
5663
5664 /** @todo Activity state and injecting interrupts. Left as a todo since we
5665 * currently don't use activity states but ACTIVE. */
5666
5667 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5668 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5669
5670 /* Guest interruptibility-state. */
5671 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5672 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5673 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5674 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5675 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5676 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5677 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5678 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5679 {
5680 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5681 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5682 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5683 }
5684 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5685 {
5686 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5687 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5688 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5689 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5690 }
5691 /** @todo Assumes the processor is not in SMM. */
5692 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5693 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5694 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5695 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5696 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5697 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5698 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5699 {
5700 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5701 }
5702
5703 /* Pending debug exceptions. */
5704 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5705 AssertRC(rc);
5706 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5707 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5708 u32Val = u64Val; /* For pending debug exceptions checks below. */
5709
5710 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5711 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5712 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5713 {
5714 if ( (u32Eflags & X86_EFL_TF)
5715 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5716 {
5717 /* Bit 14 is PendingDebug.BS. */
5718 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5719 }
5720 if ( !(u32Eflags & X86_EFL_TF)
5721 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5722 {
5723 /* Bit 14 is PendingDebug.BS. */
5724 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5725 }
5726 }
5727
5728#ifndef IN_NEM_DARWIN
5729 /* VMCS link pointer. */
5730 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5731 AssertRC(rc);
5732 if (u64Val != UINT64_C(0xffffffffffffffff))
5733 {
5734 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5735 /** @todo Bits beyond the processor's physical-address width MBZ. */
5736 /** @todo SMM checks. */
5737 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5738 Assert(pVmcsInfo->pvShadowVmcs);
5739 VMXVMCSREVID VmcsRevId;
5740 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5741 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5742 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5743 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5744 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5745 }
5746
5747 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5748 * not using nested paging? */
5749 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5750 && !fLongModeGuest
5751 && CPUMIsGuestInPAEModeEx(pCtx))
5752 {
5753 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5754 AssertRC(rc);
5755 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5756
5757 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5758 AssertRC(rc);
5759 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5760
5761 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5762 AssertRC(rc);
5763 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5764
5765 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5766 AssertRC(rc);
5767 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5768 }
5769#endif
5770
5771 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5772 if (uError == VMX_IGS_ERROR)
5773 uError = VMX_IGS_REASON_NOT_FOUND;
5774 } while (0);
5775
5776 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5777 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5778 return uError;
5779
5780#undef HMVMX_ERROR_BREAK
5781#undef HMVMX_CHECK_BREAK
5782}
5783
5784
5785#ifndef HMVMX_USE_FUNCTION_TABLE
5786/**
5787 * Handles a guest VM-exit from hardware-assisted VMX execution.
5788 *
5789 * @returns Strict VBox status code (i.e. informational status codes too).
5790 * @param pVCpu The cross context virtual CPU structure.
5791 * @param pVmxTransient The VMX-transient structure.
5792 */
5793DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5794{
5795#ifdef DEBUG_ramshankar
5796# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5797 do { \
5798 if (a_fSave != 0) \
5799 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5800 VBOXSTRICTRC rcStrict = a_CallExpr; \
5801 if (a_fSave != 0) \
5802 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5803 return rcStrict; \
5804 } while (0)
5805#else
5806# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5807#endif
5808 uint32_t const uExitReason = pVmxTransient->uExitReason;
5809 switch (uExitReason)
5810 {
5811 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5812 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5813 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5814 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5815 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5816 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5817 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5818 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5819 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5820 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5821 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5822 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5823 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5824 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5825 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5826 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5827 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5828 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5829 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5830 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5831 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5832 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5833 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5834 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5835 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5836 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5837 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5838 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5839 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5840 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5841#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5842 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5843 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5844 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5845 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5846 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5847 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5848 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5849 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5850 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5851 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5852#else
5853 case VMX_EXIT_VMCLEAR:
5854 case VMX_EXIT_VMLAUNCH:
5855 case VMX_EXIT_VMPTRLD:
5856 case VMX_EXIT_VMPTRST:
5857 case VMX_EXIT_VMREAD:
5858 case VMX_EXIT_VMRESUME:
5859 case VMX_EXIT_VMWRITE:
5860 case VMX_EXIT_VMXOFF:
5861 case VMX_EXIT_VMXON:
5862 case VMX_EXIT_INVVPID:
5863 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5864#endif
5865#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5866 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5867#else
5868 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5869#endif
5870
5871 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5872 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5873 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5874
5875 case VMX_EXIT_INIT_SIGNAL:
5876 case VMX_EXIT_SIPI:
5877 case VMX_EXIT_IO_SMI:
5878 case VMX_EXIT_SMI:
5879 case VMX_EXIT_ERR_MSR_LOAD:
5880 case VMX_EXIT_ERR_MACHINE_CHECK:
5881 case VMX_EXIT_PML_FULL:
5882 case VMX_EXIT_VIRTUALIZED_EOI:
5883 case VMX_EXIT_GDTR_IDTR_ACCESS:
5884 case VMX_EXIT_LDTR_TR_ACCESS:
5885 case VMX_EXIT_APIC_WRITE:
5886 case VMX_EXIT_RDRAND:
5887 case VMX_EXIT_RSM:
5888 case VMX_EXIT_VMFUNC:
5889 case VMX_EXIT_ENCLS:
5890 case VMX_EXIT_RDSEED:
5891 case VMX_EXIT_XSAVES:
5892 case VMX_EXIT_XRSTORS:
5893 case VMX_EXIT_UMWAIT:
5894 case VMX_EXIT_TPAUSE:
5895 case VMX_EXIT_LOADIWKEY:
5896 default:
5897 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5898 }
5899#undef VMEXIT_CALL_RET
5900}
5901#endif /* !HMVMX_USE_FUNCTION_TABLE */
5902
5903
5904#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5905/**
5906 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5907 *
5908 * @returns Strict VBox status code (i.e. informational status codes too).
5909 * @param pVCpu The cross context virtual CPU structure.
5910 * @param pVmxTransient The VMX-transient structure.
5911 */
5912DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5913{
5914 uint32_t const uExitReason = pVmxTransient->uExitReason;
5915 switch (uExitReason)
5916 {
5917# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5918 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5919 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5920# else
5921 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5922 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5923# endif
5924 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5925 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5926 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5927
5928 /*
5929 * We shouldn't direct host physical interrupts to the nested-guest.
5930 */
5931 case VMX_EXIT_EXT_INT:
5932 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5933
5934 /*
5935 * Instructions that cause VM-exits unconditionally or the condition is
5936 * always taken solely from the nested hypervisor (meaning if the VM-exit
5937 * happens, it's guaranteed to be a nested-guest VM-exit).
5938 *
5939 * - Provides VM-exit instruction length ONLY.
5940 */
5941 case VMX_EXIT_CPUID: /* Unconditional. */
5942 case VMX_EXIT_VMCALL:
5943 case VMX_EXIT_GETSEC:
5944 case VMX_EXIT_INVD:
5945 case VMX_EXIT_XSETBV:
5946 case VMX_EXIT_VMLAUNCH:
5947 case VMX_EXIT_VMRESUME:
5948 case VMX_EXIT_VMXOFF:
5949 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5950 case VMX_EXIT_VMFUNC:
5951 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5952
5953 /*
5954 * Instructions that cause VM-exits unconditionally or the condition is
5955 * always taken solely from the nested hypervisor (meaning if the VM-exit
5956 * happens, it's guaranteed to be a nested-guest VM-exit).
5957 *
5958 * - Provides VM-exit instruction length.
5959 * - Provides VM-exit information.
5960 * - Optionally provides Exit qualification.
5961 *
5962 * Since Exit qualification is 0 for all VM-exits where it is not
5963 * applicable, reading and passing it to the guest should produce
5964 * defined behavior.
5965 *
5966 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5967 */
5968 case VMX_EXIT_INVEPT: /* Unconditional. */
5969 case VMX_EXIT_INVVPID:
5970 case VMX_EXIT_VMCLEAR:
5971 case VMX_EXIT_VMPTRLD:
5972 case VMX_EXIT_VMPTRST:
5973 case VMX_EXIT_VMXON:
5974 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5975 case VMX_EXIT_LDTR_TR_ACCESS:
5976 case VMX_EXIT_RDRAND:
5977 case VMX_EXIT_RDSEED:
5978 case VMX_EXIT_XSAVES:
5979 case VMX_EXIT_XRSTORS:
5980 case VMX_EXIT_UMWAIT:
5981 case VMX_EXIT_TPAUSE:
5982 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5983
5984 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5985 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5986 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5987 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5988 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5989 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5990 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5991 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5992 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5993 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5994 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5995 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5996 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5997 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5998 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5999 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
6000 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
6001 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
6002 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
6003
6004 case VMX_EXIT_PREEMPT_TIMER:
6005 {
6006 /** @todo NSTVMX: Preempt timer. */
6007 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
6008 }
6009
6010 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
6011 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
6012
6013 case VMX_EXIT_VMREAD:
6014 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
6015
6016 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
6017 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
6018
6019 case VMX_EXIT_INIT_SIGNAL:
6020 case VMX_EXIT_SIPI:
6021 case VMX_EXIT_IO_SMI:
6022 case VMX_EXIT_SMI:
6023 case VMX_EXIT_ERR_MSR_LOAD:
6024 case VMX_EXIT_ERR_MACHINE_CHECK:
6025 case VMX_EXIT_PML_FULL:
6026 case VMX_EXIT_RSM:
6027 default:
6028 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6029 }
6030}
6031#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6032
6033
6034/** @name VM-exit helpers.
6035 * @{
6036 */
6037/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6038/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6039/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6040
6041/** Macro for VM-exits called unexpectedly. */
6042#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6043 do { \
6044 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6045 return VERR_VMX_UNEXPECTED_EXIT; \
6046 } while (0)
6047
6048#ifdef VBOX_STRICT
6049# ifndef IN_NEM_DARWIN
6050/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6051# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6052 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6053
6054# define HMVMX_ASSERT_PREEMPT_CPUID() \
6055 do { \
6056 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6057 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6058 } while (0)
6059
6060# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6061 do { \
6062 AssertPtr((a_pVCpu)); \
6063 AssertPtr((a_pVmxTransient)); \
6064 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6065 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6066 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6067 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6068 Assert((a_pVmxTransient)->pVmcsInfo); \
6069 Assert(ASMIntAreEnabled()); \
6070 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6071 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6072 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6073 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6074 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6075 HMVMX_ASSERT_PREEMPT_CPUID(); \
6076 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6077 } while (0)
6078# else
6079# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6080# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6081# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6082 do { \
6083 AssertPtr((a_pVCpu)); \
6084 AssertPtr((a_pVmxTransient)); \
6085 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6086 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6087 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6088 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6089 Assert((a_pVmxTransient)->pVmcsInfo); \
6090 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6091 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6092 } while (0)
6093# endif
6094
6095# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6096 do { \
6097 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6098 Assert((a_pVmxTransient)->fIsNestedGuest); \
6099 } while (0)
6100
6101# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6102 do { \
6103 Log4Func(("\n")); \
6104 } while (0)
6105#else
6106# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6107 do { \
6108 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6109 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6110 } while (0)
6111
6112# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6113 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6114
6115# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6116#endif
6117
6118#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6119/** Macro that does the necessary privilege checks and intercepted VM-exits for
6120 * guests that attempted to execute a VMX instruction. */
6121# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6122 do \
6123 { \
6124 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6125 if (rcStrictTmp == VINF_SUCCESS) \
6126 { /* likely */ } \
6127 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6128 { \
6129 Assert((a_pVCpu)->hm.s.Event.fPending); \
6130 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6131 return VINF_SUCCESS; \
6132 } \
6133 else \
6134 { \
6135 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6136 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6137 } \
6138 } while (0)
6139
6140/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6141# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6142 do \
6143 { \
6144 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6145 (a_pGCPtrEffAddr)); \
6146 if (rcStrictTmp == VINF_SUCCESS) \
6147 { /* likely */ } \
6148 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6149 { \
6150 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6151 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6152 NOREF(uXcptTmp); \
6153 return VINF_SUCCESS; \
6154 } \
6155 else \
6156 { \
6157 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6158 return rcStrictTmp; \
6159 } \
6160 } while (0)
6161#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6162
6163
6164/**
6165 * Advances the guest RIP by the specified number of bytes.
6166 *
6167 * @param pVCpu The cross context virtual CPU structure.
6168 * @param cbInstr Number of bytes to advance the RIP by.
6169 *
6170 * @remarks No-long-jump zone!!!
6171 */
6172DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6173{
6174 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6175
6176 /*
6177 * Advance RIP.
6178 *
6179 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6180 * when the addition causes a "carry" into the upper half and check whether
6181 * we're in 64-bit and can go on with it or wether we should zap the top
6182 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6183 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6184 *
6185 * See PC wrap around tests in bs3-cpu-weird-1.
6186 */
6187 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6188 uint64_t const uRipNext = uRipPrev + cbInstr;
6189 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6190 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6191 pVCpu->cpum.GstCtx.rip = uRipNext;
6192 else
6193 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6194
6195 /*
6196 * Clear RF and interrupt shadowing.
6197 */
6198 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6199 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6200 else
6201 {
6202 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6203 {
6204 /** @todo \#DB - single step. */
6205 }
6206 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6207 }
6208 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6209
6210 /* Mark both RIP and RFLAGS as updated. */
6211 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6212}
6213
6214
6215/**
6216 * Advances the guest RIP after reading it from the VMCS.
6217 *
6218 * @returns VBox status code, no informational status codes.
6219 * @param pVCpu The cross context virtual CPU structure.
6220 * @param pVmxTransient The VMX-transient structure.
6221 *
6222 * @remarks No-long-jump zone!!!
6223 */
6224static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6225{
6226 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6227 /** @todo consider template here after checking callers. */
6228 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6229 AssertRCReturn(rc, rc);
6230
6231 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6232 return VINF_SUCCESS;
6233}
6234
6235
6236/**
6237 * Handle a condition that occurred while delivering an event through the guest or
6238 * nested-guest IDT.
6239 *
6240 * @returns Strict VBox status code (i.e. informational status codes too).
6241 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6242 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6243 * to continue execution of the guest which will delivery the \#DF.
6244 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6245 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6246 *
6247 * @param pVCpu The cross context virtual CPU structure.
6248 * @param pVmxTransient The VMX-transient structure.
6249 *
6250 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6251 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6252 * is due to an EPT violation, PML full or SPP-related event.
6253 *
6254 * @remarks No-long-jump zone!!!
6255 */
6256static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6257{
6258 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6259 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6260 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6261 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6262 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6263 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6264
6265 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6266 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6267 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6268 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6269 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6270 {
6271 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6272 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6273
6274 /*
6275 * If the event was a software interrupt (generated with INT n) or a software exception
6276 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6277 * can handle the VM-exit and continue guest execution which will re-execute the
6278 * instruction rather than re-injecting the exception, as that can cause premature
6279 * trips to ring-3 before injection and involve TRPM which currently has no way of
6280 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6281 * the problem).
6282 */
6283 IEMXCPTRAISE enmRaise;
6284 IEMXCPTRAISEINFO fRaiseInfo;
6285 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6286 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6287 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6288 {
6289 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6290 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6291 }
6292 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6293 {
6294 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6295 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6296 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6297
6298 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6299 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6300
6301 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6302
6303 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6304 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6305 {
6306 pVmxTransient->fVectoringPF = true;
6307 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6308 }
6309 }
6310 else
6311 {
6312 /*
6313 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6314 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6315 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6316 */
6317 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6318 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6319 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6320 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6321 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6322 }
6323
6324 /*
6325 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6326 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6327 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6328 * subsequent VM-entry would fail, see @bugref{7445}.
6329 *
6330 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6331 */
6332 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6333 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6334 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6335 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6336 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6337
6338 switch (enmRaise)
6339 {
6340 case IEMXCPTRAISE_CURRENT_XCPT:
6341 {
6342 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6343 Assert(rcStrict == VINF_SUCCESS);
6344 break;
6345 }
6346
6347 case IEMXCPTRAISE_PREV_EVENT:
6348 {
6349 uint32_t u32ErrCode;
6350 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6351 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6352 else
6353 u32ErrCode = 0;
6354
6355 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6356 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6357 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6358 pVCpu->cpum.GstCtx.cr2);
6359
6360 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6361 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6362 Assert(rcStrict == VINF_SUCCESS);
6363 break;
6364 }
6365
6366 case IEMXCPTRAISE_REEXEC_INSTR:
6367 Assert(rcStrict == VINF_SUCCESS);
6368 break;
6369
6370 case IEMXCPTRAISE_DOUBLE_FAULT:
6371 {
6372 /*
6373 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6374 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6375 */
6376 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6377 {
6378 pVmxTransient->fVectoringDoublePF = true;
6379 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6380 pVCpu->cpum.GstCtx.cr2));
6381 rcStrict = VINF_SUCCESS;
6382 }
6383 else
6384 {
6385 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6386 vmxHCSetPendingXcptDF(pVCpu);
6387 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6388 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6389 rcStrict = VINF_HM_DOUBLE_FAULT;
6390 }
6391 break;
6392 }
6393
6394 case IEMXCPTRAISE_TRIPLE_FAULT:
6395 {
6396 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6397 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6398 rcStrict = VINF_EM_RESET;
6399 break;
6400 }
6401
6402 case IEMXCPTRAISE_CPU_HANG:
6403 {
6404 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6405 rcStrict = VERR_EM_GUEST_CPU_HANG;
6406 break;
6407 }
6408
6409 default:
6410 {
6411 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6412 rcStrict = VERR_VMX_IPE_2;
6413 break;
6414 }
6415 }
6416 }
6417 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6418 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6419 {
6420 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6421 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6422 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6423 {
6424 /*
6425 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6426 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6427 * that virtual NMIs remain blocked until the IRET execution is completed.
6428 *
6429 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6430 */
6431 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6432 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6433 }
6434 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6435 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6436 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6437 {
6438 /*
6439 * Execution of IRET caused an EPT violation, page-modification log-full event or
6440 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6441 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6442 * that virtual NMIs remain blocked until the IRET execution is completed.
6443 *
6444 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6445 */
6446 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6447 {
6448 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6449 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6450 }
6451 }
6452 }
6453
6454 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6455 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6456 return rcStrict;
6457}
6458
6459
6460#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6461/**
6462 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6463 * guest attempting to execute a VMX instruction.
6464 *
6465 * @returns Strict VBox status code (i.e. informational status codes too).
6466 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6467 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6468 *
6469 * @param pVCpu The cross context virtual CPU structure.
6470 * @param uExitReason The VM-exit reason.
6471 *
6472 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6473 * @remarks No-long-jump zone!!!
6474 */
6475static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6476{
6477 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6478 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6479
6480 /*
6481 * The physical CPU would have already checked the CPU mode/code segment.
6482 * We shall just assert here for paranoia.
6483 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6484 */
6485 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6486 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6487 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6488
6489 if (uExitReason == VMX_EXIT_VMXON)
6490 {
6491 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6492
6493 /*
6494 * We check CR4.VMXE because it is required to be always set while in VMX operation
6495 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6496 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6497 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6498 */
6499 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6500 {
6501 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6502 vmxHCSetPendingXcptUD(pVCpu);
6503 return VINF_HM_PENDING_XCPT;
6504 }
6505 }
6506 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6507 {
6508 /*
6509 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6510 * (other than VMXON), we need to raise a #UD.
6511 */
6512 Log4Func(("Not in VMX root mode -> #UD\n"));
6513 vmxHCSetPendingXcptUD(pVCpu);
6514 return VINF_HM_PENDING_XCPT;
6515 }
6516
6517 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6518 return VINF_SUCCESS;
6519}
6520
6521
6522/**
6523 * Decodes the memory operand of an instruction that caused a VM-exit.
6524 *
6525 * The Exit qualification field provides the displacement field for memory
6526 * operand instructions, if any.
6527 *
6528 * @returns Strict VBox status code (i.e. informational status codes too).
6529 * @retval VINF_SUCCESS if the operand was successfully decoded.
6530 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6531 * operand.
6532 * @param pVCpu The cross context virtual CPU structure.
6533 * @param uExitInstrInfo The VM-exit instruction information field.
6534 * @param enmMemAccess The memory operand's access type (read or write).
6535 * @param GCPtrDisp The instruction displacement field, if any. For
6536 * RIP-relative addressing pass RIP + displacement here.
6537 * @param pGCPtrMem Where to store the effective destination memory address.
6538 *
6539 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6540 * virtual-8086 mode hence skips those checks while verifying if the
6541 * segment is valid.
6542 */
6543static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6544 PRTGCPTR pGCPtrMem)
6545{
6546 Assert(pGCPtrMem);
6547 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6548 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6549 | CPUMCTX_EXTRN_CR0);
6550
6551 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6552 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6553 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6554
6555 VMXEXITINSTRINFO ExitInstrInfo;
6556 ExitInstrInfo.u = uExitInstrInfo;
6557 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6558 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6559 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6560 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6561 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6562 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6563 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6564 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6565 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6566
6567 /*
6568 * Validate instruction information.
6569 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6570 */
6571 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6572 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6573 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6574 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6575 AssertLogRelMsgReturn(fIsMemOperand,
6576 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6577
6578 /*
6579 * Compute the complete effective address.
6580 *
6581 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6582 * See AMD spec. 4.5.2 "Segment Registers".
6583 */
6584 RTGCPTR GCPtrMem = GCPtrDisp;
6585 if (fBaseRegValid)
6586 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6587 if (fIdxRegValid)
6588 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6589
6590 RTGCPTR const GCPtrOff = GCPtrMem;
6591 if ( !fIsLongMode
6592 || iSegReg >= X86_SREG_FS)
6593 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6594 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6595
6596 /*
6597 * Validate effective address.
6598 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6599 */
6600 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6601 Assert(cbAccess > 0);
6602 if (fIsLongMode)
6603 {
6604 if (X86_IS_CANONICAL(GCPtrMem))
6605 {
6606 *pGCPtrMem = GCPtrMem;
6607 return VINF_SUCCESS;
6608 }
6609
6610 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6611 * "Data Limit Checks in 64-bit Mode". */
6612 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6613 vmxHCSetPendingXcptGP(pVCpu, 0);
6614 return VINF_HM_PENDING_XCPT;
6615 }
6616
6617 /*
6618 * This is a watered down version of iemMemApplySegment().
6619 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6620 * and segment CPL/DPL checks are skipped.
6621 */
6622 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6623 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6624 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6625
6626 /* Check if the segment is present and usable. */
6627 if ( pSel->Attr.n.u1Present
6628 && !pSel->Attr.n.u1Unusable)
6629 {
6630 Assert(pSel->Attr.n.u1DescType);
6631 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6632 {
6633 /* Check permissions for the data segment. */
6634 if ( enmMemAccess == VMXMEMACCESS_WRITE
6635 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6636 {
6637 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6638 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6639 return VINF_HM_PENDING_XCPT;
6640 }
6641
6642 /* Check limits if it's a normal data segment. */
6643 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6644 {
6645 if ( GCPtrFirst32 > pSel->u32Limit
6646 || GCPtrLast32 > pSel->u32Limit)
6647 {
6648 Log4Func(("Data segment limit exceeded. "
6649 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6650 GCPtrLast32, pSel->u32Limit));
6651 if (iSegReg == X86_SREG_SS)
6652 vmxHCSetPendingXcptSS(pVCpu, 0);
6653 else
6654 vmxHCSetPendingXcptGP(pVCpu, 0);
6655 return VINF_HM_PENDING_XCPT;
6656 }
6657 }
6658 else
6659 {
6660 /* Check limits if it's an expand-down data segment.
6661 Note! The upper boundary is defined by the B bit, not the G bit! */
6662 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6663 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6664 {
6665 Log4Func(("Expand-down data segment limit exceeded. "
6666 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6667 GCPtrLast32, pSel->u32Limit));
6668 if (iSegReg == X86_SREG_SS)
6669 vmxHCSetPendingXcptSS(pVCpu, 0);
6670 else
6671 vmxHCSetPendingXcptGP(pVCpu, 0);
6672 return VINF_HM_PENDING_XCPT;
6673 }
6674 }
6675 }
6676 else
6677 {
6678 /* Check permissions for the code segment. */
6679 if ( enmMemAccess == VMXMEMACCESS_WRITE
6680 || ( enmMemAccess == VMXMEMACCESS_READ
6681 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6682 {
6683 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6684 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6685 vmxHCSetPendingXcptGP(pVCpu, 0);
6686 return VINF_HM_PENDING_XCPT;
6687 }
6688
6689 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6690 if ( GCPtrFirst32 > pSel->u32Limit
6691 || GCPtrLast32 > pSel->u32Limit)
6692 {
6693 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6694 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6695 if (iSegReg == X86_SREG_SS)
6696 vmxHCSetPendingXcptSS(pVCpu, 0);
6697 else
6698 vmxHCSetPendingXcptGP(pVCpu, 0);
6699 return VINF_HM_PENDING_XCPT;
6700 }
6701 }
6702 }
6703 else
6704 {
6705 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6706 vmxHCSetPendingXcptGP(pVCpu, 0);
6707 return VINF_HM_PENDING_XCPT;
6708 }
6709
6710 *pGCPtrMem = GCPtrMem;
6711 return VINF_SUCCESS;
6712}
6713#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6714
6715
6716/**
6717 * VM-exit helper for LMSW.
6718 */
6719static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6720{
6721 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6722 AssertRCReturn(rc, rc);
6723
6724 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6725 AssertMsg( rcStrict == VINF_SUCCESS
6726 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6727
6728 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6729 if (rcStrict == VINF_IEM_RAISED_XCPT)
6730 {
6731 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6732 rcStrict = VINF_SUCCESS;
6733 }
6734
6735 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6736 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6737 return rcStrict;
6738}
6739
6740
6741/**
6742 * VM-exit helper for CLTS.
6743 */
6744static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6745{
6746 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6747 AssertRCReturn(rc, rc);
6748
6749 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6750 AssertMsg( rcStrict == VINF_SUCCESS
6751 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6752
6753 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6754 if (rcStrict == VINF_IEM_RAISED_XCPT)
6755 {
6756 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6757 rcStrict = VINF_SUCCESS;
6758 }
6759
6760 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6761 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6762 return rcStrict;
6763}
6764
6765
6766/**
6767 * VM-exit helper for MOV from CRx (CRx read).
6768 */
6769static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6770{
6771 Assert(iCrReg < 16);
6772 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6773
6774 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6775 AssertRCReturn(rc, rc);
6776
6777 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6778 AssertMsg( rcStrict == VINF_SUCCESS
6779 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6780
6781 if (iGReg == X86_GREG_xSP)
6782 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6783 else
6784 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6785#ifdef VBOX_WITH_STATISTICS
6786 switch (iCrReg)
6787 {
6788 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6789 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6790 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6791 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6792 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6793 }
6794#endif
6795 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6796 return rcStrict;
6797}
6798
6799
6800/**
6801 * VM-exit helper for MOV to CRx (CRx write).
6802 */
6803static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6804{
6805 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6806
6807 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6808 AssertMsg( rcStrict == VINF_SUCCESS
6809 || rcStrict == VINF_IEM_RAISED_XCPT
6810 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6811
6812 switch (iCrReg)
6813 {
6814 case 0:
6815 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6816 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6817 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6818 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6819 break;
6820
6821 case 2:
6822 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6823 /* Nothing to do here, CR2 it's not part of the VMCS. */
6824 break;
6825
6826 case 3:
6827 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6828 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6829 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6830 break;
6831
6832 case 4:
6833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6834 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6835#ifndef IN_NEM_DARWIN
6836 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6837 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6838#else
6839 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6840#endif
6841 break;
6842
6843 case 8:
6844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6845 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6846 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6847 break;
6848
6849 default:
6850 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6851 break;
6852 }
6853
6854 if (rcStrict == VINF_IEM_RAISED_XCPT)
6855 {
6856 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6857 rcStrict = VINF_SUCCESS;
6858 }
6859 return rcStrict;
6860}
6861
6862
6863/**
6864 * VM-exit exception handler for \#PF (Page-fault exception).
6865 *
6866 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6867 */
6868static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6869{
6870 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6871 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6872
6873#ifndef IN_NEM_DARWIN
6874 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6875 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6876 { /* likely */ }
6877 else
6878#endif
6879 {
6880#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6881 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6882#endif
6883 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6884 if (!pVmxTransient->fVectoringDoublePF)
6885 {
6886 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6887 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6888 }
6889 else
6890 {
6891 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6892 Assert(!pVmxTransient->fIsNestedGuest);
6893 vmxHCSetPendingXcptDF(pVCpu);
6894 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6895 }
6896 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6897 return VINF_SUCCESS;
6898 }
6899
6900 Assert(!pVmxTransient->fIsNestedGuest);
6901
6902 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6903 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6904 if (pVmxTransient->fVectoringPF)
6905 {
6906 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6907 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6908 }
6909
6910 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6911 AssertRCReturn(rc, rc);
6912
6913 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6914 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6915
6916 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6917 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6918
6919 Log4Func(("#PF: rc=%Rrc\n", rc));
6920 if (rc == VINF_SUCCESS)
6921 {
6922 /*
6923 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6924 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6925 */
6926 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6927 TRPMResetTrap(pVCpu);
6928 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6929 return rc;
6930 }
6931
6932 if (rc == VINF_EM_RAW_GUEST_TRAP)
6933 {
6934 if (!pVmxTransient->fVectoringDoublePF)
6935 {
6936 /* It's a guest page fault and needs to be reflected to the guest. */
6937 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6938 TRPMResetTrap(pVCpu);
6939 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6940 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6941 uGstErrorCode, pVmxTransient->uExitQual);
6942 }
6943 else
6944 {
6945 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6946 TRPMResetTrap(pVCpu);
6947 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6948 vmxHCSetPendingXcptDF(pVCpu);
6949 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6950 }
6951
6952 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6953 return VINF_SUCCESS;
6954 }
6955
6956 TRPMResetTrap(pVCpu);
6957 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6958 return rc;
6959}
6960
6961
6962/**
6963 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6964 *
6965 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6966 */
6967static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6968{
6969 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6970 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6971
6972 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6973 AssertRCReturn(rc, rc);
6974
6975 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6976 {
6977 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6978 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6979
6980 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6981 * provides VM-exit instruction length. If this causes problem later,
6982 * disassemble the instruction like it's done on AMD-V. */
6983 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6984 AssertRCReturn(rc2, rc2);
6985 return rc;
6986 }
6987
6988 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6989 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6990 return VINF_SUCCESS;
6991}
6992
6993
6994/**
6995 * VM-exit exception handler for \#BP (Breakpoint exception).
6996 *
6997 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6998 */
6999static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7000{
7001 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7002 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7003
7004 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7005 AssertRCReturn(rc, rc);
7006
7007 VBOXSTRICTRC rcStrict;
7008 if (!pVmxTransient->fIsNestedGuest)
7009 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7010 else
7011 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7012
7013 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7014 {
7015 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7016 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7017 rcStrict = VINF_SUCCESS;
7018 }
7019
7020 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7021 return rcStrict;
7022}
7023
7024
7025/**
7026 * VM-exit exception handler for \#AC (Alignment-check exception).
7027 *
7028 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7029 */
7030static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7031{
7032 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7033
7034 /*
7035 * Detect #ACs caused by host having enabled split-lock detection.
7036 * Emulate such instructions.
7037 */
7038#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7039 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7040 AssertRCReturn(rc, rc);
7041 /** @todo detect split lock in cpu feature? */
7042 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7043 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7044 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7045 || CPUMGetGuestCPL(pVCpu) != 3
7046 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7047 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7048 {
7049 /*
7050 * Check for debug/trace events and import state accordingly.
7051 */
7052 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7053 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7054 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7055#ifndef IN_NEM_DARWIN
7056 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7057#endif
7058 )
7059 {
7060 if (pVM->cCpus == 1)
7061 {
7062#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7063 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7064 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7065#else
7066 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7067 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7068#endif
7069 AssertRCReturn(rc, rc);
7070 }
7071 }
7072 else
7073 {
7074 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7075 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7076 AssertRCReturn(rc, rc);
7077
7078 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7079
7080 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7081 {
7082 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7083 if (rcStrict != VINF_SUCCESS)
7084 return rcStrict;
7085 }
7086 }
7087
7088 /*
7089 * Emulate the instruction.
7090 *
7091 * We have to ignore the LOCK prefix here as we must not retrigger the
7092 * detection on the host. This isn't all that satisfactory, though...
7093 */
7094 if (pVM->cCpus == 1)
7095 {
7096 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7097 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7098
7099 /** @todo For SMP configs we should do a rendezvous here. */
7100 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7101 if (rcStrict == VINF_SUCCESS)
7102#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7103 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7104 HM_CHANGED_GUEST_RIP
7105 | HM_CHANGED_GUEST_RFLAGS
7106 | HM_CHANGED_GUEST_GPRS_MASK
7107 | HM_CHANGED_GUEST_CS
7108 | HM_CHANGED_GUEST_SS);
7109#else
7110 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7111#endif
7112 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7113 {
7114 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7115 rcStrict = VINF_SUCCESS;
7116 }
7117 return rcStrict;
7118 }
7119 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7121 return VINF_EM_EMULATE_SPLIT_LOCK;
7122 }
7123
7124 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7125 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7126 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7127
7128 /* Re-inject it. We'll detect any nesting before getting here. */
7129 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7130 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7131 return VINF_SUCCESS;
7132}
7133
7134
7135/**
7136 * VM-exit exception handler for \#DB (Debug exception).
7137 *
7138 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7139 */
7140static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7141{
7142 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7143 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7144
7145 /*
7146 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7147 */
7148 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7149
7150 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7151 uint64_t const uDR6 = X86_DR6_INIT_VAL
7152 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7153 | X86_DR6_BD | X86_DR6_BS));
7154 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7155
7156 int rc;
7157 if (!pVmxTransient->fIsNestedGuest)
7158 {
7159 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7160
7161 /*
7162 * Prevents stepping twice over the same instruction when the guest is stepping using
7163 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7164 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7165 */
7166 if ( rc == VINF_EM_DBG_STEPPED
7167 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7168 {
7169 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7170 rc = VINF_EM_RAW_GUEST_TRAP;
7171 }
7172 }
7173 else
7174 rc = VINF_EM_RAW_GUEST_TRAP;
7175 Log6Func(("rc=%Rrc\n", rc));
7176 if (rc == VINF_EM_RAW_GUEST_TRAP)
7177 {
7178 /*
7179 * The exception was for the guest. Update DR6, DR7.GD and
7180 * IA32_DEBUGCTL.LBR before forwarding it.
7181 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7182 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7183 */
7184#ifndef IN_NEM_DARWIN
7185 VMMRZCallRing3Disable(pVCpu);
7186 HM_DISABLE_PREEMPT(pVCpu);
7187
7188 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7189 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7190 if (CPUMIsGuestDebugStateActive(pVCpu))
7191 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7192
7193 HM_RESTORE_PREEMPT();
7194 VMMRZCallRing3Enable(pVCpu);
7195#else
7196 /** @todo */
7197#endif
7198
7199 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7200 AssertRCReturn(rc, rc);
7201
7202 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7203 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7204
7205 /* Paranoia. */
7206 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7207 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7208
7209 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7210 AssertRC(rc);
7211
7212 /*
7213 * Raise #DB in the guest.
7214 *
7215 * It is important to reflect exactly what the VM-exit gave us (preserving the
7216 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7217 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7218 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7219 *
7220 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7221 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7222 */
7223 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7224 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7225 return VINF_SUCCESS;
7226 }
7227
7228 /*
7229 * Not a guest trap, must be a hypervisor related debug event then.
7230 * Update DR6 in case someone is interested in it.
7231 */
7232 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7233 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7234 CPUMSetHyperDR6(pVCpu, uDR6);
7235
7236 return rc;
7237}
7238
7239
7240/**
7241 * Hacks its way around the lovely mesa driver's backdoor accesses.
7242 *
7243 * @sa hmR0SvmHandleMesaDrvGp.
7244 */
7245static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7246{
7247 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7248 RT_NOREF(pCtx);
7249
7250 /* For now we'll just skip the instruction. */
7251 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7252}
7253
7254
7255/**
7256 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7257 * backdoor logging w/o checking what it is running inside.
7258 *
7259 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7260 * backdoor port and magic numbers loaded in registers.
7261 *
7262 * @returns true if it is, false if it isn't.
7263 * @sa hmR0SvmIsMesaDrvGp.
7264 */
7265DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7266{
7267 /* 0xed: IN eAX,dx */
7268 uint8_t abInstr[1];
7269 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7270 return false;
7271
7272 /* Check that it is #GP(0). */
7273 if (pVmxTransient->uExitIntErrorCode != 0)
7274 return false;
7275
7276 /* Check magic and port. */
7277 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7278 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7279 if (pCtx->rax != UINT32_C(0x564d5868))
7280 return false;
7281 if (pCtx->dx != UINT32_C(0x5658))
7282 return false;
7283
7284 /* Flat ring-3 CS. */
7285 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7286 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7287 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7288 if (pCtx->cs.Attr.n.u2Dpl != 3)
7289 return false;
7290 if (pCtx->cs.u64Base != 0)
7291 return false;
7292
7293 /* Check opcode. */
7294 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7295 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7296 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7297 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7298 if (RT_FAILURE(rc))
7299 return false;
7300 if (abInstr[0] != 0xed)
7301 return false;
7302
7303 return true;
7304}
7305
7306
7307/**
7308 * VM-exit exception handler for \#GP (General-protection exception).
7309 *
7310 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7311 */
7312static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7313{
7314 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7315 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7316
7317 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7318 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7319#ifndef IN_NEM_DARWIN
7320 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7321 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7322 { /* likely */ }
7323 else
7324#endif
7325 {
7326#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7327# ifndef IN_NEM_DARWIN
7328 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7329# else
7330 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7331# endif
7332#endif
7333 /*
7334 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7335 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7336 */
7337 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7338 AssertRCReturn(rc, rc);
7339 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7340 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7341
7342 if ( pVmxTransient->fIsNestedGuest
7343 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7344 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7345 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7346 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7347 else
7348 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7349 return rc;
7350 }
7351
7352#ifndef IN_NEM_DARWIN
7353 Assert(CPUMIsGuestInRealModeEx(pCtx));
7354 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7355 Assert(!pVmxTransient->fIsNestedGuest);
7356
7357 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7358 AssertRCReturn(rc, rc);
7359
7360 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7361 if (rcStrict == VINF_SUCCESS)
7362 {
7363 if (!CPUMIsGuestInRealModeEx(pCtx))
7364 {
7365 /*
7366 * The guest is no longer in real-mode, check if we can continue executing the
7367 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7368 */
7369 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7370 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7371 {
7372 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7373 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7374 }
7375 else
7376 {
7377 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7378 rcStrict = VINF_EM_RESCHEDULE;
7379 }
7380 }
7381 else
7382 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7383 }
7384 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7385 {
7386 rcStrict = VINF_SUCCESS;
7387 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7388 }
7389 return VBOXSTRICTRC_VAL(rcStrict);
7390#endif
7391}
7392
7393
7394/**
7395 * VM-exit exception handler for \#DE (Divide Error).
7396 *
7397 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7398 */
7399static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7400{
7401 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7402 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7403
7404 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7405 AssertRCReturn(rc, rc);
7406
7407 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7408 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7409 {
7410 uint8_t cbInstr = 0;
7411 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7412 if (rc2 == VINF_SUCCESS)
7413 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7414 else if (rc2 == VERR_NOT_FOUND)
7415 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7416 else
7417 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7418 }
7419 else
7420 rcStrict = VINF_SUCCESS; /* Do nothing. */
7421
7422 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7423 if (RT_FAILURE(rcStrict))
7424 {
7425 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7426 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7427 rcStrict = VINF_SUCCESS;
7428 }
7429
7430 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7431 return VBOXSTRICTRC_VAL(rcStrict);
7432}
7433
7434
7435/**
7436 * VM-exit exception handler wrapper for all other exceptions that are not handled
7437 * by a specific handler.
7438 *
7439 * This simply re-injects the exception back into the VM without any special
7440 * processing.
7441 *
7442 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7443 */
7444static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7445{
7446 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7447
7448#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7449# ifndef IN_NEM_DARWIN
7450 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7451 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7452 ("uVector=%#x u32XcptBitmap=%#X32\n",
7453 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7454 NOREF(pVmcsInfo);
7455# endif
7456#endif
7457
7458 /*
7459 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7460 * would have been handled while checking exits due to event delivery.
7461 */
7462 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7463
7464#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7465 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7466 AssertRCReturn(rc, rc);
7467 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7468#endif
7469
7470#ifdef VBOX_WITH_STATISTICS
7471 switch (uVector)
7472 {
7473 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7474 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7475 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7476 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7477 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7478 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7479 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7480 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7481 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7482 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7483 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7484 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7485 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7486 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7487 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7488 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7489 default:
7490 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7491 break;
7492 }
7493#endif
7494
7495 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7496 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7497 NOREF(uVector);
7498
7499 /* Re-inject the original exception into the guest. */
7500 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7501 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7502 return VINF_SUCCESS;
7503}
7504
7505
7506/**
7507 * VM-exit exception handler for all exceptions (except NMIs!).
7508 *
7509 * @remarks This may be called for both guests and nested-guests. Take care to not
7510 * make assumptions and avoid doing anything that is not relevant when
7511 * executing a nested-guest (e.g., Mesa driver hacks).
7512 */
7513static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7514{
7515 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7516
7517 /*
7518 * If this VM-exit occurred while delivering an event through the guest IDT, take
7519 * action based on the return code and additional hints (e.g. for page-faults)
7520 * that will be updated in the VMX transient structure.
7521 */
7522 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7523 if (rcStrict == VINF_SUCCESS)
7524 {
7525 /*
7526 * If an exception caused a VM-exit due to delivery of an event, the original
7527 * event may have to be re-injected into the guest. We shall reinject it and
7528 * continue guest execution. However, page-fault is a complicated case and
7529 * needs additional processing done in vmxHCExitXcptPF().
7530 */
7531 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7532 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7533 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7534 || uVector == X86_XCPT_PF)
7535 {
7536 switch (uVector)
7537 {
7538 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7539 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7540 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7541 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7542 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7543 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7544 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7545 default:
7546 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7547 }
7548 }
7549 /* else: inject pending event before resuming guest execution. */
7550 }
7551 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7552 {
7553 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7554 rcStrict = VINF_SUCCESS;
7555 }
7556
7557 return rcStrict;
7558}
7559/** @} */
7560
7561
7562/** @name VM-exit handlers.
7563 * @{
7564 */
7565/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7566/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7567/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7568
7569/**
7570 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7571 */
7572HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7573{
7574 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7575 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7576
7577#ifndef IN_NEM_DARWIN
7578 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7579 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7580 return VINF_SUCCESS;
7581 return VINF_EM_RAW_INTERRUPT;
7582#else
7583 return VINF_SUCCESS;
7584#endif
7585}
7586
7587
7588/**
7589 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7590 * VM-exit.
7591 */
7592HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7593{
7594 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7595 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7596
7597 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7598
7599 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7600 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7601 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7602
7603 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7604 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7605 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7606 NOREF(pVmcsInfo);
7607
7608 VBOXSTRICTRC rcStrict;
7609 switch (uExitIntType)
7610 {
7611#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7612 /*
7613 * Host physical NMIs:
7614 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7615 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7616 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7617 *
7618 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7619 * See Intel spec. 27.5.5 "Updating Non-Register State".
7620 */
7621 case VMX_EXIT_INT_INFO_TYPE_NMI:
7622 {
7623 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7624 break;
7625 }
7626#endif
7627
7628 /*
7629 * Privileged software exceptions (#DB from ICEBP),
7630 * Software exceptions (#BP and #OF),
7631 * Hardware exceptions:
7632 * Process the required exceptions and resume guest execution if possible.
7633 */
7634 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7635 Assert(uVector == X86_XCPT_DB);
7636 RT_FALL_THRU();
7637 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7638 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7639 RT_FALL_THRU();
7640 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7641 {
7642 NOREF(uVector);
7643 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7644 | HMVMX_READ_EXIT_INSTR_LEN
7645 | HMVMX_READ_IDT_VECTORING_INFO
7646 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7647 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7648 break;
7649 }
7650
7651 default:
7652 {
7653 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7654 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7655 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7656 break;
7657 }
7658 }
7659
7660 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7661 return rcStrict;
7662}
7663
7664
7665/**
7666 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7667 */
7668HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7669{
7670 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7671
7672 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7673 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7674 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7675
7676 /* Evaluate and deliver pending events and resume guest execution. */
7677 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7678 return VINF_SUCCESS;
7679}
7680
7681
7682/**
7683 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7684 */
7685HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7686{
7687 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7688
7689 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7690 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7691 {
7692 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7693 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7694 }
7695
7696 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7697
7698 /*
7699 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7700 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7701 */
7702 uint32_t fIntrState;
7703 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7704 AssertRC(rc);
7705 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7706 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7707 {
7708 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7709
7710 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7711 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7712 AssertRC(rc);
7713 }
7714
7715 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7716 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7717
7718 /* Evaluate and deliver pending events and resume guest execution. */
7719 return VINF_SUCCESS;
7720}
7721
7722
7723/**
7724 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7725 */
7726HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7727{
7728 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7729 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7730}
7731
7732
7733/**
7734 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7735 */
7736HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7737{
7738 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7739 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7740}
7741
7742
7743/**
7744 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7745 */
7746HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7747{
7748 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7749
7750 /*
7751 * Get the state we need and update the exit history entry.
7752 */
7753 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7754 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7755 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7756 AssertRCReturn(rc, rc);
7757
7758 VBOXSTRICTRC rcStrict;
7759 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7760 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7761 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7762 if (!pExitRec)
7763 {
7764 /*
7765 * Regular CPUID instruction execution.
7766 */
7767 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7768 if (rcStrict == VINF_SUCCESS)
7769 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7770 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7771 {
7772 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7773 rcStrict = VINF_SUCCESS;
7774 }
7775 }
7776 else
7777 {
7778 /*
7779 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7780 */
7781 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7782 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7783 AssertRCReturn(rc2, rc2);
7784
7785 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7786 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7787
7788 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7789 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7790
7791 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7792 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7793 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7794 }
7795 return rcStrict;
7796}
7797
7798
7799/**
7800 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7801 */
7802HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7803{
7804 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7805
7806 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7807 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7808 AssertRCReturn(rc, rc);
7809
7810 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7811 return VINF_EM_RAW_EMULATE_INSTR;
7812
7813 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7814 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7815}
7816
7817
7818/**
7819 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7820 */
7821HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7822{
7823 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7824
7825 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7826 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7827 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7828 AssertRCReturn(rc, rc);
7829
7830 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7831 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7832 {
7833 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7834 we must reset offsetting on VM-entry. See @bugref{6634}. */
7835 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7836 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7837 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7838 }
7839 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7840 {
7841 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7842 rcStrict = VINF_SUCCESS;
7843 }
7844 return rcStrict;
7845}
7846
7847
7848/**
7849 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7850 */
7851HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7852{
7853 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7854
7855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7856 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7857 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7858 AssertRCReturn(rc, rc);
7859
7860 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7861 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7862 {
7863 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7864 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7865 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7866 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7868 }
7869 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7870 {
7871 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7872 rcStrict = VINF_SUCCESS;
7873 }
7874 return rcStrict;
7875}
7876
7877
7878/**
7879 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7880 */
7881HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7882{
7883 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7884
7885 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7886 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7887 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7888 AssertRCReturn(rc, rc);
7889
7890 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7891 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7892 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7893 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7894 {
7895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7896 rcStrict = VINF_SUCCESS;
7897 }
7898 return rcStrict;
7899}
7900
7901
7902/**
7903 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7904 */
7905HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7906{
7907 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7908
7909 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7910 if (EMAreHypercallInstructionsEnabled(pVCpu))
7911 {
7912 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7913 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7914 | CPUMCTX_EXTRN_RFLAGS
7915 | CPUMCTX_EXTRN_CR0
7916 | CPUMCTX_EXTRN_SS
7917 | CPUMCTX_EXTRN_CS
7918 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7919 AssertRCReturn(rc, rc);
7920
7921 /* Perform the hypercall. */
7922 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7923 if (rcStrict == VINF_SUCCESS)
7924 {
7925 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7926 AssertRCReturn(rc, rc);
7927 }
7928 else
7929 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7930 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7931 || RT_FAILURE(rcStrict));
7932
7933 /* If the hypercall changes anything other than guest's general-purpose registers,
7934 we would need to reload the guest changed bits here before VM-entry. */
7935 }
7936 else
7937 Log4Func(("Hypercalls not enabled\n"));
7938
7939 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7940 if (RT_FAILURE(rcStrict))
7941 {
7942 vmxHCSetPendingXcptUD(pVCpu);
7943 rcStrict = VINF_SUCCESS;
7944 }
7945
7946 return rcStrict;
7947}
7948
7949
7950/**
7951 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7952 */
7953HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7954{
7955 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7956#ifndef IN_NEM_DARWIN
7957 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7958#endif
7959
7960 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7961 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7962 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7963 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7964 AssertRCReturn(rc, rc);
7965
7966 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7967
7968 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7969 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7970 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7971 {
7972 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7973 rcStrict = VINF_SUCCESS;
7974 }
7975 else
7976 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7977 VBOXSTRICTRC_VAL(rcStrict)));
7978 return rcStrict;
7979}
7980
7981
7982/**
7983 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7984 */
7985HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7986{
7987 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7988
7989 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7990 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7991 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7992 AssertRCReturn(rc, rc);
7993
7994 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7995 if (rcStrict == VINF_SUCCESS)
7996 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7997 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7998 {
7999 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8000 rcStrict = VINF_SUCCESS;
8001 }
8002
8003 return rcStrict;
8004}
8005
8006
8007/**
8008 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8009 */
8010HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8011{
8012 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8013
8014 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8015 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8016 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8017 AssertRCReturn(rc, rc);
8018
8019 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8020 if (RT_SUCCESS(rcStrict))
8021 {
8022 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8023 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8024 rcStrict = VINF_SUCCESS;
8025 }
8026
8027 return rcStrict;
8028}
8029
8030
8031/**
8032 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8033 * VM-exit.
8034 */
8035HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8036{
8037 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8038 return VINF_EM_RESET;
8039}
8040
8041
8042/**
8043 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8044 */
8045HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8046{
8047 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8048
8049 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8050 AssertRCReturn(rc, rc);
8051
8052 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8053 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8054 rc = VINF_SUCCESS;
8055 else
8056 rc = VINF_EM_HALT;
8057
8058 if (rc != VINF_SUCCESS)
8059 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8060 return rc;
8061}
8062
8063
8064#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8065/**
8066 * VM-exit handler for instructions that result in a \#UD exception delivered to
8067 * the guest.
8068 */
8069HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8070{
8071 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8072 vmxHCSetPendingXcptUD(pVCpu);
8073 return VINF_SUCCESS;
8074}
8075#endif
8076
8077
8078/**
8079 * VM-exit handler for expiry of the VMX-preemption timer.
8080 */
8081HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8082{
8083 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8084
8085 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8086 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8087Log12(("vmxHCExitPreemptTimer:\n"));
8088
8089 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8090 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8091 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8092 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8093 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8094}
8095
8096
8097/**
8098 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8099 */
8100HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8101{
8102 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8103
8104 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8105 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8106 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8107 AssertRCReturn(rc, rc);
8108
8109 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8110 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8111 : HM_CHANGED_RAISED_XCPT_MASK);
8112
8113#ifndef IN_NEM_DARWIN
8114 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8115 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8116 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8117 {
8118 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8119 hmR0VmxUpdateStartVmFunction(pVCpu);
8120 }
8121#endif
8122
8123 return rcStrict;
8124}
8125
8126
8127/**
8128 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8129 */
8130HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8131{
8132 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8133
8134 /** @todo Enable the new code after finding a reliably guest test-case. */
8135#if 1
8136 return VERR_EM_INTERPRETER;
8137#else
8138 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8139 | HMVMX_READ_EXIT_INSTR_INFO
8140 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8141 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8142 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8143 AssertRCReturn(rc, rc);
8144
8145 /* Paranoia. Ensure this has a memory operand. */
8146 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8147
8148 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8149 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8150 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8151 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8152
8153 RTGCPTR GCPtrDesc;
8154 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8155
8156 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8157 GCPtrDesc, uType);
8158 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8159 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8160 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8161 {
8162 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8163 rcStrict = VINF_SUCCESS;
8164 }
8165 return rcStrict;
8166#endif
8167}
8168
8169
8170/**
8171 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8172 * VM-exit.
8173 */
8174HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8175{
8176 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8177 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8178 AssertRCReturn(rc, rc);
8179
8180 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8181 if (RT_FAILURE(rc))
8182 return rc;
8183
8184 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8185 NOREF(uInvalidReason);
8186
8187#ifdef VBOX_STRICT
8188 uint32_t fIntrState;
8189 uint64_t u64Val;
8190 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8191 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8192 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8193
8194 Log4(("uInvalidReason %u\n", uInvalidReason));
8195 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8196 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8197 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8198
8199 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8200 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8201 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8202 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8203 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8204 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8205 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8206 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8207 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8208 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8209 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8210 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8211# ifndef IN_NEM_DARWIN
8212 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8213 {
8214 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8215 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8216 }
8217
8218 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8219# endif
8220#endif
8221
8222 return VERR_VMX_INVALID_GUEST_STATE;
8223}
8224
8225/**
8226 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8227 */
8228HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8229{
8230 /*
8231 * Cumulative notes of all recognized but unexpected VM-exits.
8232 *
8233 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8234 * nested-paging is used.
8235 *
8236 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8237 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8238 * this function (and thereby stop VM execution) for handling such instructions.
8239 *
8240 *
8241 * VMX_EXIT_INIT_SIGNAL:
8242 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8243 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8244 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8245 *
8246 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8247 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8248 * See Intel spec. "23.8 Restrictions on VMX operation".
8249 *
8250 * VMX_EXIT_SIPI:
8251 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8252 * activity state is used. We don't make use of it as our guests don't have direct
8253 * access to the host local APIC.
8254 *
8255 * See Intel spec. 25.3 "Other Causes of VM-exits".
8256 *
8257 * VMX_EXIT_IO_SMI:
8258 * VMX_EXIT_SMI:
8259 * This can only happen if we support dual-monitor treatment of SMI, which can be
8260 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8261 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8262 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8263 *
8264 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8265 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8266 *
8267 * VMX_EXIT_ERR_MSR_LOAD:
8268 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8269 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8270 * execution.
8271 *
8272 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8273 *
8274 * VMX_EXIT_ERR_MACHINE_CHECK:
8275 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8276 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8277 * #MC exception abort class exception is raised. We thus cannot assume a
8278 * reasonable chance of continuing any sort of execution and we bail.
8279 *
8280 * See Intel spec. 15.1 "Machine-check Architecture".
8281 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8282 *
8283 * VMX_EXIT_PML_FULL:
8284 * VMX_EXIT_VIRTUALIZED_EOI:
8285 * VMX_EXIT_APIC_WRITE:
8286 * We do not currently support any of these features and thus they are all unexpected
8287 * VM-exits.
8288 *
8289 * VMX_EXIT_GDTR_IDTR_ACCESS:
8290 * VMX_EXIT_LDTR_TR_ACCESS:
8291 * VMX_EXIT_RDRAND:
8292 * VMX_EXIT_RSM:
8293 * VMX_EXIT_VMFUNC:
8294 * VMX_EXIT_ENCLS:
8295 * VMX_EXIT_RDSEED:
8296 * VMX_EXIT_XSAVES:
8297 * VMX_EXIT_XRSTORS:
8298 * VMX_EXIT_UMWAIT:
8299 * VMX_EXIT_TPAUSE:
8300 * VMX_EXIT_LOADIWKEY:
8301 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8302 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8303 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8304 *
8305 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8306 */
8307 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8308 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8309 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8310}
8311
8312
8313/**
8314 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8315 */
8316HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8317{
8318 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8319
8320 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8321
8322 /** @todo Optimize this: We currently drag in the whole MSR state
8323 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8324 * MSRs required. That would require changes to IEM and possibly CPUM too.
8325 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8326 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8327 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8328 int rc;
8329 switch (idMsr)
8330 {
8331 default:
8332 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8333 __FUNCTION__);
8334 AssertRCReturn(rc, rc);
8335 break;
8336 case MSR_K8_FS_BASE:
8337 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8338 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8339 AssertRCReturn(rc, rc);
8340 break;
8341 case MSR_K8_GS_BASE:
8342 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8343 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8344 AssertRCReturn(rc, rc);
8345 break;
8346 }
8347
8348 Log4Func(("ecx=%#RX32\n", idMsr));
8349
8350#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8351 Assert(!pVmxTransient->fIsNestedGuest);
8352 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8353 {
8354 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8355 && idMsr != MSR_K6_EFER)
8356 {
8357 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8358 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8359 }
8360 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8361 {
8362 Assert(pVmcsInfo->pvMsrBitmap);
8363 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8364 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8365 {
8366 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8367 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8368 }
8369 }
8370 }
8371#endif
8372
8373 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8374 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8375 if (rcStrict == VINF_SUCCESS)
8376 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8377 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8378 {
8379 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8380 rcStrict = VINF_SUCCESS;
8381 }
8382 else
8383 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8384 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8385
8386 return rcStrict;
8387}
8388
8389
8390/**
8391 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8392 */
8393HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8394{
8395 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8396
8397 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8398
8399 /*
8400 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8401 * Although we don't need to fetch the base as it will be overwritten shortly, while
8402 * loading guest-state we would also load the entire segment register including limit
8403 * and attributes and thus we need to load them here.
8404 */
8405 /** @todo Optimize this: We currently drag in the whole MSR state
8406 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8407 * MSRs required. That would require changes to IEM and possibly CPUM too.
8408 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8409 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8410 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8411 int rc;
8412 switch (idMsr)
8413 {
8414 default:
8415 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8416 __FUNCTION__);
8417 AssertRCReturn(rc, rc);
8418 break;
8419
8420 case MSR_K8_FS_BASE:
8421 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8422 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8423 AssertRCReturn(rc, rc);
8424 break;
8425 case MSR_K8_GS_BASE:
8426 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8427 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8428 AssertRCReturn(rc, rc);
8429 break;
8430 }
8431 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8432
8433 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8434 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8435
8436 if (rcStrict == VINF_SUCCESS)
8437 {
8438 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8439
8440 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8441 if ( idMsr == MSR_IA32_APICBASE
8442 || ( idMsr >= MSR_IA32_X2APIC_START
8443 && idMsr <= MSR_IA32_X2APIC_END))
8444 {
8445 /*
8446 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8447 * When full APIC register virtualization is implemented we'll have to make
8448 * sure APIC state is saved from the VMCS before IEM changes it.
8449 */
8450 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8451 }
8452 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8453 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8454 else if (idMsr == MSR_K6_EFER)
8455 {
8456 /*
8457 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8458 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8459 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8460 */
8461 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8462 }
8463
8464 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8465 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8466 {
8467 switch (idMsr)
8468 {
8469 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8470 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8471 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8472 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8473 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8474 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8475 default:
8476 {
8477#ifndef IN_NEM_DARWIN
8478 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8479 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8480 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8481 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8482#else
8483 AssertMsgFailed(("TODO\n"));
8484#endif
8485 break;
8486 }
8487 }
8488 }
8489#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8490 else
8491 {
8492 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8493 switch (idMsr)
8494 {
8495 case MSR_IA32_SYSENTER_CS:
8496 case MSR_IA32_SYSENTER_EIP:
8497 case MSR_IA32_SYSENTER_ESP:
8498 case MSR_K8_FS_BASE:
8499 case MSR_K8_GS_BASE:
8500 {
8501 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8502 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8503 }
8504
8505 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8506 default:
8507 {
8508 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8509 {
8510 /* EFER MSR writes are always intercepted. */
8511 if (idMsr != MSR_K6_EFER)
8512 {
8513 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8514 idMsr));
8515 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8516 }
8517 }
8518
8519 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8520 {
8521 Assert(pVmcsInfo->pvMsrBitmap);
8522 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8523 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8524 {
8525 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8526 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8527 }
8528 }
8529 break;
8530 }
8531 }
8532 }
8533#endif /* VBOX_STRICT */
8534 }
8535 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8536 {
8537 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8538 rcStrict = VINF_SUCCESS;
8539 }
8540 else
8541 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8542 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8543
8544 return rcStrict;
8545}
8546
8547
8548/**
8549 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8550 */
8551HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8552{
8553 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8554
8555 /** @todo The guest has likely hit a contended spinlock. We might want to
8556 * poke a schedule different guest VCPU. */
8557 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8558 if (RT_SUCCESS(rc))
8559 return VINF_EM_RAW_INTERRUPT;
8560
8561 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8562 return rc;
8563}
8564
8565
8566/**
8567 * VM-exit handler for when the TPR value is lowered below the specified
8568 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8569 */
8570HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8571{
8572 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8573 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8574
8575 /*
8576 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8577 * We'll re-evaluate pending interrupts and inject them before the next VM
8578 * entry so we can just continue execution here.
8579 */
8580 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8581 return VINF_SUCCESS;
8582}
8583
8584
8585/**
8586 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8587 * VM-exit.
8588 *
8589 * @retval VINF_SUCCESS when guest execution can continue.
8590 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8591 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8592 * incompatible guest state for VMX execution (real-on-v86 case).
8593 */
8594HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8595{
8596 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8597 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8598
8599 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8600 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8601 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8602
8603 VBOXSTRICTRC rcStrict;
8604 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8605 uint64_t const uExitQual = pVmxTransient->uExitQual;
8606 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8607 switch (uAccessType)
8608 {
8609 /*
8610 * MOV to CRx.
8611 */
8612 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8613 {
8614 /*
8615 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8616 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8617 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8618 * PAE PDPTEs as well.
8619 */
8620 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8621 AssertRCReturn(rc, rc);
8622
8623 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8624#ifndef IN_NEM_DARWIN
8625 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8626#endif
8627 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8628 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8629
8630 /*
8631 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8632 * - When nested paging isn't used.
8633 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8634 * - We are executing in the VM debug loop.
8635 */
8636#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8637# ifndef IN_NEM_DARWIN
8638 Assert( iCrReg != 3
8639 || !VM_IS_VMX_NESTED_PAGING(pVM)
8640 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8641 || pVCpu->hmr0.s.fUsingDebugLoop);
8642# else
8643 Assert( iCrReg != 3
8644 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8645# endif
8646#endif
8647
8648 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8649 Assert( iCrReg != 8
8650 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8651
8652 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8653 AssertMsg( rcStrict == VINF_SUCCESS
8654 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8655
8656#ifndef IN_NEM_DARWIN
8657 /*
8658 * This is a kludge for handling switches back to real mode when we try to use
8659 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8660 * deal with special selector values, so we have to return to ring-3 and run
8661 * there till the selector values are V86 mode compatible.
8662 *
8663 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8664 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8665 * this function.
8666 */
8667 if ( iCrReg == 0
8668 && rcStrict == VINF_SUCCESS
8669 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8670 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8671 && (uOldCr0 & X86_CR0_PE)
8672 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8673 {
8674 /** @todo Check selectors rather than returning all the time. */
8675 Assert(!pVmxTransient->fIsNestedGuest);
8676 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8677 rcStrict = VINF_EM_RESCHEDULE_REM;
8678 }
8679#endif
8680
8681 break;
8682 }
8683
8684 /*
8685 * MOV from CRx.
8686 */
8687 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8688 {
8689 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8690 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8691
8692 /*
8693 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8694 * - When nested paging isn't used.
8695 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8696 * - We are executing in the VM debug loop.
8697 */
8698#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8699# ifndef IN_NEM_DARWIN
8700 Assert( iCrReg != 3
8701 || !VM_IS_VMX_NESTED_PAGING(pVM)
8702 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8703 || pVCpu->hmr0.s.fLeaveDone);
8704# else
8705 Assert( iCrReg != 3
8706 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8707# endif
8708#endif
8709
8710 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8711 Assert( iCrReg != 8
8712 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8713
8714 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8715 break;
8716 }
8717
8718 /*
8719 * CLTS (Clear Task-Switch Flag in CR0).
8720 */
8721 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8722 {
8723 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8724 break;
8725 }
8726
8727 /*
8728 * LMSW (Load Machine-Status Word into CR0).
8729 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8730 */
8731 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8732 {
8733 RTGCPTR GCPtrEffDst;
8734 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8735 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8736 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8737 if (fMemOperand)
8738 {
8739 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8740 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8741 }
8742 else
8743 GCPtrEffDst = NIL_RTGCPTR;
8744 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8745 break;
8746 }
8747
8748 default:
8749 {
8750 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8751 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8752 }
8753 }
8754
8755 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8756 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8757 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8758
8759 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8760 NOREF(pVM);
8761 return rcStrict;
8762}
8763
8764
8765/**
8766 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8767 * VM-exit.
8768 */
8769HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8770{
8771 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8772 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8773
8774 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8775 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8776 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8777 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8778#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8779 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8780 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8781 AssertRCReturn(rc, rc);
8782
8783 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8784 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8785 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8786 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8787 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8788 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8789 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8790 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8791
8792 /*
8793 * Update exit history to see if this exit can be optimized.
8794 */
8795 VBOXSTRICTRC rcStrict;
8796 PCEMEXITREC pExitRec = NULL;
8797 if ( !fGstStepping
8798 && !fDbgStepping)
8799 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8800 !fIOString
8801 ? !fIOWrite
8802 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8803 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8804 : !fIOWrite
8805 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8806 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8807 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8808 if (!pExitRec)
8809 {
8810 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8811 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8812
8813 uint32_t const cbValue = s_aIOSizes[uIOSize];
8814 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8815 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8816 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8817 if (fIOString)
8818 {
8819 /*
8820 * INS/OUTS - I/O String instruction.
8821 *
8822 * Use instruction-information if available, otherwise fall back on
8823 * interpreting the instruction.
8824 */
8825 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8826 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8827 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8828 if (fInsOutsInfo)
8829 {
8830 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8831 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8832 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8833 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8834 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8835 if (fIOWrite)
8836 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8837 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8838 else
8839 {
8840 /*
8841 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8842 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8843 * See Intel Instruction spec. for "INS".
8844 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8845 */
8846 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8847 }
8848 }
8849 else
8850 rcStrict = IEMExecOne(pVCpu);
8851
8852 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8853 fUpdateRipAlready = true;
8854 }
8855 else
8856 {
8857 /*
8858 * IN/OUT - I/O instruction.
8859 */
8860 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8861 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8862 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8863 if (fIOWrite)
8864 {
8865 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8866 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8867#ifndef IN_NEM_DARWIN
8868 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8869 && !pCtx->eflags.Bits.u1TF)
8870 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8871#endif
8872 }
8873 else
8874 {
8875 uint32_t u32Result = 0;
8876 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8877 if (IOM_SUCCESS(rcStrict))
8878 {
8879 /* Save result of I/O IN instr. in AL/AX/EAX. */
8880 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8881 }
8882#ifndef IN_NEM_DARWIN
8883 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8884 && !pCtx->eflags.Bits.u1TF)
8885 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8886#endif
8887 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8888 }
8889 }
8890
8891 if (IOM_SUCCESS(rcStrict))
8892 {
8893 if (!fUpdateRipAlready)
8894 {
8895 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8896 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8897 }
8898
8899 /*
8900 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8901 * while booting Fedora 17 64-bit guest.
8902 *
8903 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8904 */
8905 if (fIOString)
8906 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8907
8908 /*
8909 * If any I/O breakpoints are armed, we need to check if one triggered
8910 * and take appropriate action.
8911 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8912 */
8913#if 1
8914 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8915#else
8916 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8917 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8918 AssertRCReturn(rc, rc);
8919#endif
8920
8921 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8922 * execution engines about whether hyper BPs and such are pending. */
8923 uint32_t const uDr7 = pCtx->dr[7];
8924 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8925 && X86_DR7_ANY_RW_IO(uDr7)
8926 && (pCtx->cr4 & X86_CR4_DE))
8927 || DBGFBpIsHwIoArmed(pVM)))
8928 {
8929 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8930
8931#ifndef IN_NEM_DARWIN
8932 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8933 VMMRZCallRing3Disable(pVCpu);
8934 HM_DISABLE_PREEMPT(pVCpu);
8935
8936 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8937
8938 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8939 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8940 {
8941 /* Raise #DB. */
8942 if (fIsGuestDbgActive)
8943 ASMSetDR6(pCtx->dr[6]);
8944 if (pCtx->dr[7] != uDr7)
8945 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8946
8947 vmxHCSetPendingXcptDB(pVCpu);
8948 }
8949 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8950 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8951 else if ( rcStrict2 != VINF_SUCCESS
8952 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8953 rcStrict = rcStrict2;
8954 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8955
8956 HM_RESTORE_PREEMPT();
8957 VMMRZCallRing3Enable(pVCpu);
8958#else
8959 /** @todo */
8960#endif
8961 }
8962 }
8963
8964#ifdef VBOX_STRICT
8965 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8966 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8967 Assert(!fIOWrite);
8968 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8969 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8970 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8971 Assert(fIOWrite);
8972 else
8973 {
8974# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8975 * statuses, that the VMM device and some others may return. See
8976 * IOM_SUCCESS() for guidance. */
8977 AssertMsg( RT_FAILURE(rcStrict)
8978 || rcStrict == VINF_SUCCESS
8979 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8980 || rcStrict == VINF_EM_DBG_BREAKPOINT
8981 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8982 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8983# endif
8984 }
8985#endif
8986 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8987 }
8988 else
8989 {
8990 /*
8991 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8992 */
8993 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8994 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8995 AssertRCReturn(rc2, rc2);
8996 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8997 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8998 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8999 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9000 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9001 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9002
9003 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9004 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9005
9006 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9007 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9008 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9009 }
9010 return rcStrict;
9011}
9012
9013
9014/**
9015 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9016 * VM-exit.
9017 */
9018HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9019{
9020 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9021
9022 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9023 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9024 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9025 {
9026 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9027 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9028 {
9029 uint32_t uErrCode;
9030 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9031 {
9032 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9033 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9034 }
9035 else
9036 uErrCode = 0;
9037
9038 RTGCUINTPTR GCPtrFaultAddress;
9039 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9040 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9041 else
9042 GCPtrFaultAddress = 0;
9043
9044 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9045
9046 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9047 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9048
9049 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9050 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9051 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9052 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9053 }
9054 }
9055
9056 /* Fall back to the interpreter to emulate the task-switch. */
9057 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9058 return VERR_EM_INTERPRETER;
9059}
9060
9061
9062/**
9063 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9064 */
9065HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9066{
9067 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9068
9069 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9070 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9071 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9072 AssertRC(rc);
9073 return VINF_EM_DBG_STEPPED;
9074}
9075
9076
9077/**
9078 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9079 */
9080HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9081{
9082 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9083 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9084
9085 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9086 | HMVMX_READ_EXIT_INSTR_LEN
9087 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9088 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9089 | HMVMX_READ_IDT_VECTORING_INFO
9090 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9091
9092 /*
9093 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9094 */
9095 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9096 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9097 {
9098 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9099 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9100 {
9101 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9102 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9103 }
9104 }
9105 else
9106 {
9107 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9108 return rcStrict;
9109 }
9110
9111 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9112 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9113 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9114 AssertRCReturn(rc, rc);
9115
9116 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9117 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9118 switch (uAccessType)
9119 {
9120#ifndef IN_NEM_DARWIN
9121 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9122 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9123 {
9124 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9125 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9126 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9127
9128 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9129 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9130 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9131 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9132 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9133
9134 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9135 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9136 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9137 if ( rcStrict == VINF_SUCCESS
9138 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9139 || rcStrict == VERR_PAGE_NOT_PRESENT)
9140 {
9141 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9142 | HM_CHANGED_GUEST_APIC_TPR);
9143 rcStrict = VINF_SUCCESS;
9144 }
9145 break;
9146 }
9147#else
9148 /** @todo */
9149#endif
9150
9151 default:
9152 {
9153 Log4Func(("uAccessType=%#x\n", uAccessType));
9154 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9155 break;
9156 }
9157 }
9158
9159 if (rcStrict != VINF_SUCCESS)
9160 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9161 return rcStrict;
9162}
9163
9164
9165/**
9166 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9167 * VM-exit.
9168 */
9169HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9170{
9171 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9172 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9173
9174 /*
9175 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9176 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9177 * must emulate the MOV DRx access.
9178 */
9179 if (!pVmxTransient->fIsNestedGuest)
9180 {
9181 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9182 if ( pVmxTransient->fWasGuestDebugStateActive
9183#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9184 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9185#endif
9186 )
9187 {
9188 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9189 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9190 }
9191
9192 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9193 && !pVmxTransient->fWasHyperDebugStateActive)
9194 {
9195 Assert(!DBGFIsStepping(pVCpu));
9196 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9197
9198 /* Whether we disable intercepting MOV DRx instructions and resume
9199 the current one, or emulate it and keep intercepting them is
9200 configurable. Though it usually comes down to whether there are
9201 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9202#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9203 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9204#else
9205 bool const fResumeInstruction = true;
9206#endif
9207 if (fResumeInstruction)
9208 {
9209 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9210 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9211 AssertRC(rc);
9212 }
9213
9214#ifndef IN_NEM_DARWIN
9215 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9216 VMMRZCallRing3Disable(pVCpu);
9217 HM_DISABLE_PREEMPT(pVCpu);
9218
9219 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9220 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9221 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9222
9223 HM_RESTORE_PREEMPT();
9224 VMMRZCallRing3Enable(pVCpu);
9225#else
9226 CPUMR3NemActivateGuestDebugState(pVCpu);
9227 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9228 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9229#endif
9230
9231 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9232 if (fResumeInstruction)
9233 {
9234#ifdef VBOX_WITH_STATISTICS
9235 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9236 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9237 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9238 else
9239 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9240#endif
9241 return VINF_SUCCESS;
9242 }
9243 }
9244 }
9245
9246 /*
9247 * Import state. We must have DR7 loaded here as it's always consulted,
9248 * both for reading and writing. The other debug registers are never
9249 * exported as such.
9250 */
9251 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9252 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9253 | CPUMCTX_EXTRN_GPRS_MASK
9254 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9255 AssertRCReturn(rc, rc);
9256
9257 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9258 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9259 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9260 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9261
9262 VBOXSTRICTRC rcStrict;
9263 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9264 {
9265 /*
9266 * Write DRx register.
9267 */
9268 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9269 AssertMsg( rcStrict == VINF_SUCCESS
9270 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9271
9272 if (rcStrict == VINF_SUCCESS)
9273 {
9274 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9275 * kept it for now to avoid breaking something non-obvious. */
9276 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9277 | HM_CHANGED_GUEST_DR7);
9278 /* Update the DR6 register if guest debug state is active, otherwise we'll
9279 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9280 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9281 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9282 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9283 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9284 }
9285 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9286 {
9287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9288 rcStrict = VINF_SUCCESS;
9289 }
9290
9291 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9292 }
9293 else
9294 {
9295 /*
9296 * Read DRx register into a general purpose register.
9297 */
9298 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9299 AssertMsg( rcStrict == VINF_SUCCESS
9300 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9301
9302 if (rcStrict == VINF_SUCCESS)
9303 {
9304 if (iGReg == X86_GREG_xSP)
9305 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9306 | HM_CHANGED_GUEST_RSP);
9307 else
9308 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9309 }
9310 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9311 {
9312 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9313 rcStrict = VINF_SUCCESS;
9314 }
9315
9316 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9317 }
9318
9319 return rcStrict;
9320}
9321
9322
9323/**
9324 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9325 * Conditional VM-exit.
9326 */
9327HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9328{
9329 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9330
9331#ifndef IN_NEM_DARWIN
9332 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9333
9334 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9335 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9336 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9337 | HMVMX_READ_IDT_VECTORING_INFO
9338 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9339 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9340
9341 /*
9342 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9343 */
9344 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9345 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9346 {
9347 /*
9348 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9349 * instruction emulation to inject the original event. Otherwise, injecting the original event
9350 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9351 */
9352 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9353 { /* likely */ }
9354 else
9355 {
9356 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9357# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9358 /** @todo NSTVMX: Think about how this should be handled. */
9359 if (pVmxTransient->fIsNestedGuest)
9360 return VERR_VMX_IPE_3;
9361# endif
9362 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9363 }
9364 }
9365 else
9366 {
9367 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9368 return rcStrict;
9369 }
9370
9371 /*
9372 * Get sufficient state and update the exit history entry.
9373 */
9374 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9375 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9376 AssertRCReturn(rc, rc);
9377
9378 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9379 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9380 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9381 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9382 if (!pExitRec)
9383 {
9384 /*
9385 * If we succeed, resume guest execution.
9386 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9387 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9388 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9389 * weird case. See @bugref{6043}.
9390 */
9391 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9392/** @todo bird: We can probably just go straight to IOM here and assume that
9393 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9394 * well. However, we need to address that aliasing workarounds that
9395 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9396 *
9397 * Might also be interesting to see if we can get this done more or
9398 * less locklessly inside IOM. Need to consider the lookup table
9399 * updating and use a bit more carefully first (or do all updates via
9400 * rendezvous) */
9401 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9402 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9403 if ( rcStrict == VINF_SUCCESS
9404 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9405 || rcStrict == VERR_PAGE_NOT_PRESENT)
9406 {
9407 /* Successfully handled MMIO operation. */
9408 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9409 | HM_CHANGED_GUEST_APIC_TPR);
9410 rcStrict = VINF_SUCCESS;
9411 }
9412 }
9413 else
9414 {
9415 /*
9416 * Frequent exit or something needing probing. Call EMHistoryExec.
9417 */
9418 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9419 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9420
9421 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9422 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9423
9424 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9425 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9426 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9427 }
9428 return rcStrict;
9429#else
9430 AssertFailed();
9431 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9432#endif
9433}
9434
9435
9436/**
9437 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9438 * VM-exit.
9439 */
9440HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9441{
9442 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9443#ifndef IN_NEM_DARWIN
9444 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9445
9446 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9447 | HMVMX_READ_EXIT_INSTR_LEN
9448 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9449 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9450 | HMVMX_READ_IDT_VECTORING_INFO
9451 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9452 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9453
9454 /*
9455 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9456 */
9457 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9458 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9459 {
9460 /*
9461 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9462 * we shall resolve the nested #PF and re-inject the original event.
9463 */
9464 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9465 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9466 }
9467 else
9468 {
9469 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9470 return rcStrict;
9471 }
9472
9473 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9474 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9475 AssertRCReturn(rc, rc);
9476
9477 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9478 uint64_t const uExitQual = pVmxTransient->uExitQual;
9479 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9480
9481 RTGCUINT uErrorCode = 0;
9482 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9483 uErrorCode |= X86_TRAP_PF_ID;
9484 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9485 uErrorCode |= X86_TRAP_PF_RW;
9486 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9487 uErrorCode |= X86_TRAP_PF_P;
9488
9489 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9490 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9491
9492 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9493
9494 /*
9495 * Handle the pagefault trap for the nested shadow table.
9496 */
9497 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9498 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9499 TRPMResetTrap(pVCpu);
9500
9501 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9502 if ( rcStrict == VINF_SUCCESS
9503 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9504 || rcStrict == VERR_PAGE_NOT_PRESENT)
9505 {
9506 /* Successfully synced our nested page tables. */
9507 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9508 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9509 return VINF_SUCCESS;
9510 }
9511 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9512 return rcStrict;
9513
9514#else /* IN_NEM_DARWIN */
9515 PVM pVM = pVCpu->CTX_SUFF(pVM);
9516 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9517 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9518 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9519 vmxHCImportGuestRip(pVCpu);
9520 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9521
9522 /*
9523 * Ask PGM for information about the given GCPhys. We need to check if we're
9524 * out of sync first.
9525 */
9526 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9527 false,
9528 false };
9529 PGMPHYSNEMPAGEINFO Info;
9530 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9531 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9532 if (RT_SUCCESS(rc))
9533 {
9534 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9535 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9536 {
9537 if (State.fCanResume)
9538 {
9539 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9540 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9541 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9542 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9543 State.fDidSomething ? "" : " no-change"));
9544 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9545 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9546 return VINF_SUCCESS;
9547 }
9548 }
9549
9550 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9551 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9552 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9553 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9554 State.fDidSomething ? "" : " no-change"));
9555 }
9556 else
9557 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9558 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9559 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9560
9561 /*
9562 * Emulate the memory access, either access handler or special memory.
9563 */
9564 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9565 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9566 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9567 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9568 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9569
9570 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9571 AssertRCReturn(rc, rc);
9572
9573 VBOXSTRICTRC rcStrict;
9574 if (!pExitRec)
9575 rcStrict = IEMExecOne(pVCpu);
9576 else
9577 {
9578 /* Frequent access or probing. */
9579 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9580 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9581 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9582 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9583 }
9584
9585 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9586
9587 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9588 return rcStrict;
9589#endif /* IN_NEM_DARWIN */
9590}
9591
9592#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9593
9594/**
9595 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9596 */
9597HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9598{
9599 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9600
9601 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9602 | HMVMX_READ_EXIT_INSTR_INFO
9603 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9604 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9605 | CPUMCTX_EXTRN_SREG_MASK
9606 | CPUMCTX_EXTRN_HWVIRT
9607 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9608 AssertRCReturn(rc, rc);
9609
9610 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9611
9612 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9613 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9614
9615 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9616 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9617 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9618 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9619 {
9620 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9621 rcStrict = VINF_SUCCESS;
9622 }
9623 return rcStrict;
9624}
9625
9626
9627/**
9628 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9629 */
9630HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9631{
9632 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9633
9634 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9635 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9636 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9637 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9638 AssertRCReturn(rc, rc);
9639
9640 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9641
9642 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9643 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9644 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9645 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9646 {
9647 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9648 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9649 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9650 }
9651 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9652 return rcStrict;
9653}
9654
9655
9656/**
9657 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9658 */
9659HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9660{
9661 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9662
9663 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9664 | HMVMX_READ_EXIT_INSTR_INFO
9665 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9666 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9667 | CPUMCTX_EXTRN_SREG_MASK
9668 | CPUMCTX_EXTRN_HWVIRT
9669 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9670 AssertRCReturn(rc, rc);
9671
9672 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9673
9674 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9675 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9676
9677 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9678 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9679 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9680 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9681 {
9682 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9683 rcStrict = VINF_SUCCESS;
9684 }
9685 return rcStrict;
9686}
9687
9688
9689/**
9690 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9691 */
9692HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9693{
9694 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9695
9696 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9697 | HMVMX_READ_EXIT_INSTR_INFO
9698 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9699 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9700 | CPUMCTX_EXTRN_SREG_MASK
9701 | CPUMCTX_EXTRN_HWVIRT
9702 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9703 AssertRCReturn(rc, rc);
9704
9705 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9706
9707 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9708 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9709
9710 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9711 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9712 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9713 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9714 {
9715 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9716 rcStrict = VINF_SUCCESS;
9717 }
9718 return rcStrict;
9719}
9720
9721
9722/**
9723 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9724 */
9725HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9726{
9727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9728
9729 /*
9730 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9731 * thus might not need to import the shadow VMCS state, it's safer just in case
9732 * code elsewhere dares look at unsynced VMCS fields.
9733 */
9734 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9735 | HMVMX_READ_EXIT_INSTR_INFO
9736 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9737 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9738 | CPUMCTX_EXTRN_SREG_MASK
9739 | CPUMCTX_EXTRN_HWVIRT
9740 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9741 AssertRCReturn(rc, rc);
9742
9743 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9744
9745 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9746 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9747 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9748
9749 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9750 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9751 {
9752 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9753
9754# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9755 /* Try for exit optimization. This is on the following instruction
9756 because it would be a waste of time to have to reinterpret the
9757 already decoded vmwrite instruction. */
9758 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9759 if (pExitRec)
9760 {
9761 /* Frequent access or probing. */
9762 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9763 AssertRCReturn(rc, rc);
9764
9765 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9766 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9767 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9768 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9769 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9770 }
9771# endif
9772 }
9773 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9774 {
9775 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9776 rcStrict = VINF_SUCCESS;
9777 }
9778 return rcStrict;
9779}
9780
9781
9782/**
9783 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9784 */
9785HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9786{
9787 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9788
9789 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9790 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9791 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9792 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9793 AssertRCReturn(rc, rc);
9794
9795 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9796
9797 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9798 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9799 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9800 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9801 {
9802 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9803 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9804 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9805 }
9806 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9807 return rcStrict;
9808}
9809
9810
9811/**
9812 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9813 */
9814HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9815{
9816 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9817
9818 /*
9819 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9820 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9821 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9822 */
9823 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9824 | HMVMX_READ_EXIT_INSTR_INFO
9825 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9826 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9827 | CPUMCTX_EXTRN_SREG_MASK
9828 | CPUMCTX_EXTRN_HWVIRT
9829 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9830 AssertRCReturn(rc, rc);
9831
9832 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9833
9834 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9835 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9836 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9837
9838 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9839 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9840 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9841 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9842 {
9843 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9844 rcStrict = VINF_SUCCESS;
9845 }
9846 return rcStrict;
9847}
9848
9849
9850/**
9851 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9852 */
9853HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9854{
9855 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9856
9857 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9858 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9859 | CPUMCTX_EXTRN_HWVIRT
9860 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9861 AssertRCReturn(rc, rc);
9862
9863 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9864
9865 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9866 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9868 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9869 {
9870 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9871 rcStrict = VINF_SUCCESS;
9872 }
9873 return rcStrict;
9874}
9875
9876
9877/**
9878 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9879 */
9880HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9881{
9882 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9883
9884 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9885 | HMVMX_READ_EXIT_INSTR_INFO
9886 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9887 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9888 | CPUMCTX_EXTRN_SREG_MASK
9889 | CPUMCTX_EXTRN_HWVIRT
9890 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9891 AssertRCReturn(rc, rc);
9892
9893 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9894
9895 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9896 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9897
9898 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9899 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9900 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9901 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9902 {
9903 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9904 rcStrict = VINF_SUCCESS;
9905 }
9906 return rcStrict;
9907}
9908
9909
9910/**
9911 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9912 */
9913HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9914{
9915 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9916
9917 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9918 | HMVMX_READ_EXIT_INSTR_INFO
9919 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9920 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9921 | CPUMCTX_EXTRN_SREG_MASK
9922 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9923 AssertRCReturn(rc, rc);
9924
9925 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9926
9927 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9928 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9929
9930 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9931 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9932 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9933 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9934 {
9935 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9936 rcStrict = VINF_SUCCESS;
9937 }
9938 return rcStrict;
9939}
9940
9941
9942# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9943/**
9944 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9945 */
9946HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9947{
9948 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9949
9950 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9951 | HMVMX_READ_EXIT_INSTR_INFO
9952 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9953 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9954 | CPUMCTX_EXTRN_SREG_MASK
9955 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9956 AssertRCReturn(rc, rc);
9957
9958 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9959
9960 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9961 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9962
9963 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9964 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9965 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9966 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9967 {
9968 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9969 rcStrict = VINF_SUCCESS;
9970 }
9971 return rcStrict;
9972}
9973# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9974#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9975/** @} */
9976
9977
9978#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9979/** @name Nested-guest VM-exit handlers.
9980 * @{
9981 */
9982/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9983/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9984/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9985
9986/**
9987 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9988 * Conditional VM-exit.
9989 */
9990HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9991{
9992 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9993
9994 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9995
9996 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9997 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9998 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9999
10000 switch (uExitIntType)
10001 {
10002# ifndef IN_NEM_DARWIN
10003 /*
10004 * Physical NMIs:
10005 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10006 */
10007 case VMX_EXIT_INT_INFO_TYPE_NMI:
10008 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10009# endif
10010
10011 /*
10012 * Hardware exceptions,
10013 * Software exceptions,
10014 * Privileged software exceptions:
10015 * Figure out if the exception must be delivered to the guest or the nested-guest.
10016 */
10017 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10018 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10019 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10020 {
10021 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10022 | HMVMX_READ_EXIT_INSTR_LEN
10023 | HMVMX_READ_IDT_VECTORING_INFO
10024 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10025
10026 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10027 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10028 {
10029 /* Exit qualification is required for debug and page-fault exceptions. */
10030 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10031
10032 /*
10033 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10034 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10035 * length. However, if delivery of a software interrupt, software exception or privileged
10036 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10037 */
10038 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10039 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10040 pVmxTransient->uExitIntErrorCode,
10041 pVmxTransient->uIdtVectoringInfo,
10042 pVmxTransient->uIdtVectoringErrorCode);
10043#ifdef DEBUG_ramshankar
10044 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10045 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10046 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10047 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10048 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10049 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10050#endif
10051 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10052 }
10053
10054 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10055 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10056 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10057 }
10058
10059 /*
10060 * Software interrupts:
10061 * VM-exits cannot be caused by software interrupts.
10062 *
10063 * External interrupts:
10064 * This should only happen when "acknowledge external interrupts on VM-exit"
10065 * control is set. However, we never set this when executing a guest or
10066 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10067 * the guest.
10068 */
10069 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10070 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10071 default:
10072 {
10073 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10074 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10075 }
10076 }
10077}
10078
10079
10080/**
10081 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10082 * Unconditional VM-exit.
10083 */
10084HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10085{
10086 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10087 return IEMExecVmxVmexitTripleFault(pVCpu);
10088}
10089
10090
10091/**
10092 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10093 */
10094HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10095{
10096 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10097
10098 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10099 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10100 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10101}
10102
10103
10104/**
10105 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10106 */
10107HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10108{
10109 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10110
10111 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10112 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10113 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10114}
10115
10116
10117/**
10118 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10119 * Unconditional VM-exit.
10120 */
10121HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10122{
10123 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10124
10125 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10126 | HMVMX_READ_EXIT_INSTR_LEN
10127 | HMVMX_READ_IDT_VECTORING_INFO
10128 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10129
10130 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10131 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10132 pVmxTransient->uIdtVectoringErrorCode);
10133 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10134}
10135
10136
10137/**
10138 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10139 */
10140HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10141{
10142 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10143
10144 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10145 {
10146 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10147 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10148 }
10149 return vmxHCExitHlt(pVCpu, pVmxTransient);
10150}
10151
10152
10153/**
10154 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10155 */
10156HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10157{
10158 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10159
10160 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10161 {
10162 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10163 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10164 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10165 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10166 }
10167 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10168}
10169
10170
10171/**
10172 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10173 */
10174HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10175{
10176 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10177
10178 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10179 {
10180 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10181 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10182 }
10183 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10184}
10185
10186
10187/**
10188 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10189 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10190 */
10191HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10192{
10193 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10194
10195 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10196 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10197
10198 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10199
10200 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10201 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10202 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10203
10204 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10205 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10206 u64VmcsField &= UINT64_C(0xffffffff);
10207
10208 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10209 {
10210 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10211 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10212 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10213 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10214 }
10215
10216 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10217 return vmxHCExitVmread(pVCpu, pVmxTransient);
10218 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10219}
10220
10221
10222/**
10223 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10224 */
10225HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10226{
10227 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10228
10229 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10230 {
10231 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10232 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10233 }
10234
10235 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10236}
10237
10238
10239/**
10240 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10241 * Conditional VM-exit.
10242 */
10243HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10244{
10245 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10246
10247 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10248 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10249
10250 VBOXSTRICTRC rcStrict;
10251 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10252 switch (uAccessType)
10253 {
10254 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10255 {
10256 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10257 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10258 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10259 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10260
10261 bool fIntercept;
10262 switch (iCrReg)
10263 {
10264 case 0:
10265 case 4:
10266 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10267 break;
10268
10269 case 3:
10270 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10271 break;
10272
10273 case 8:
10274 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10275 break;
10276
10277 default:
10278 fIntercept = false;
10279 break;
10280 }
10281 if (fIntercept)
10282 {
10283 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10284 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10285 }
10286 else
10287 {
10288 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10289 AssertRCReturn(rc, rc);
10290 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10291 }
10292 break;
10293 }
10294
10295 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10296 {
10297 /*
10298 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10299 * CR2 reads do not cause a VM-exit.
10300 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10301 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10302 */
10303 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10304 if ( iCrReg == 3
10305 || iCrReg == 8)
10306 {
10307 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10308 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10309 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10310 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10311 {
10312 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10313 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10314 }
10315 else
10316 {
10317 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10318 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10319 }
10320 }
10321 else
10322 {
10323 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10324 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10325 }
10326 break;
10327 }
10328
10329 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10330 {
10331 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10332 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10333 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10334 if ( (uGstHostMask & X86_CR0_TS)
10335 && (uReadShadow & X86_CR0_TS))
10336 {
10337 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10338 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10339 }
10340 else
10341 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10342 break;
10343 }
10344
10345 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10346 {
10347 RTGCPTR GCPtrEffDst;
10348 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10349 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10350 if (fMemOperand)
10351 {
10352 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10353 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10354 }
10355 else
10356 GCPtrEffDst = NIL_RTGCPTR;
10357
10358 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10359 {
10360 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10361 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10362 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10363 }
10364 else
10365 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10366 break;
10367 }
10368
10369 default:
10370 {
10371 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10372 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10373 }
10374 }
10375
10376 if (rcStrict == VINF_IEM_RAISED_XCPT)
10377 {
10378 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10379 rcStrict = VINF_SUCCESS;
10380 }
10381 return rcStrict;
10382}
10383
10384
10385/**
10386 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10387 * Conditional VM-exit.
10388 */
10389HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10390{
10391 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10392
10393 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10394 {
10395 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10396 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10397 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10398 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10399 }
10400 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10401}
10402
10403
10404/**
10405 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10406 * Conditional VM-exit.
10407 */
10408HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10409{
10410 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10411
10412 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10413
10414 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10415 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10416 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10417
10418 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10419 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10420 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10421 {
10422 /*
10423 * IN/OUT instruction:
10424 * - Provides VM-exit instruction length.
10425 *
10426 * INS/OUTS instruction:
10427 * - Provides VM-exit instruction length.
10428 * - Provides Guest-linear address.
10429 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10430 */
10431 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10432 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10433
10434 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10435 pVmxTransient->ExitInstrInfo.u = 0;
10436 pVmxTransient->uGuestLinearAddr = 0;
10437
10438 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10439 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10440 if (fIOString)
10441 {
10442 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10443 if (fVmxInsOutsInfo)
10444 {
10445 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10446 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10447 }
10448 }
10449
10450 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10451 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10452 }
10453 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10454}
10455
10456
10457/**
10458 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10459 */
10460HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10461{
10462 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10463
10464 uint32_t fMsrpm;
10465 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10466 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10467 else
10468 fMsrpm = VMXMSRPM_EXIT_RD;
10469
10470 if (fMsrpm & VMXMSRPM_EXIT_RD)
10471 {
10472 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10473 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10474 }
10475 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10476}
10477
10478
10479/**
10480 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10481 */
10482HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10483{
10484 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10485
10486 uint32_t fMsrpm;
10487 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10488 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10489 else
10490 fMsrpm = VMXMSRPM_EXIT_WR;
10491
10492 if (fMsrpm & VMXMSRPM_EXIT_WR)
10493 {
10494 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10495 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10496 }
10497 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10498}
10499
10500
10501/**
10502 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10503 */
10504HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10505{
10506 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10507
10508 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10509 {
10510 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10511 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10512 }
10513 return vmxHCExitMwait(pVCpu, pVmxTransient);
10514}
10515
10516
10517/**
10518 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10519 * VM-exit.
10520 */
10521HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10522{
10523 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10524
10525 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10526 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10527 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10528 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10529}
10530
10531
10532/**
10533 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10534 */
10535HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10536{
10537 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10538
10539 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10540 {
10541 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10542 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10543 }
10544 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10545}
10546
10547
10548/**
10549 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10550 */
10551HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10552{
10553 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10554
10555 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10556 * PAUSE when executing a nested-guest? If it does not, we would not need
10557 * to check for the intercepts here. Just call VM-exit... */
10558
10559 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10560 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10561 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10562 {
10563 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10564 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10565 }
10566 return vmxHCExitPause(pVCpu, pVmxTransient);
10567}
10568
10569
10570/**
10571 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10572 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10573 */
10574HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10575{
10576 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10577
10578 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10579 {
10580 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10581 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10582 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10583 }
10584 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10585}
10586
10587
10588/**
10589 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10590 * VM-exit.
10591 */
10592HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10593{
10594 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10595
10596 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10597 | HMVMX_READ_EXIT_INSTR_LEN
10598 | HMVMX_READ_IDT_VECTORING_INFO
10599 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10600
10601 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10602
10603 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10604 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10605
10606 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10607 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10608 pVmxTransient->uIdtVectoringErrorCode);
10609 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10610}
10611
10612
10613/**
10614 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10615 * Conditional VM-exit.
10616 */
10617HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10618{
10619 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10620
10621 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10622 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10623 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10624}
10625
10626
10627/**
10628 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10629 * Conditional VM-exit.
10630 */
10631HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10632{
10633 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10634
10635 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10636 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10637 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10638}
10639
10640
10641/**
10642 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10643 */
10644HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10645{
10646 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10647
10648 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10649 {
10650 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10651 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10652 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10653 }
10654 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10655}
10656
10657
10658/**
10659 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10660 */
10661HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10662{
10663 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10664
10665 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10666 {
10667 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10668 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10669 }
10670 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10671}
10672
10673
10674/**
10675 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10676 */
10677HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10678{
10679 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10680
10681 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10682 {
10683 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10684 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10685 | HMVMX_READ_EXIT_INSTR_INFO
10686 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10687 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10688 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10689 }
10690 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10691}
10692
10693
10694/**
10695 * Nested-guest VM-exit handler for invalid-guest state
10696 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10697 */
10698HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10699{
10700 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10701
10702 /*
10703 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10704 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10705 * Handle it like it's in an invalid guest state of the outer guest.
10706 *
10707 * When the fast path is implemented, this should be changed to cause the corresponding
10708 * nested-guest VM-exit.
10709 */
10710 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10711}
10712
10713
10714/**
10715 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10716 * and only provide the instruction length.
10717 *
10718 * Unconditional VM-exit.
10719 */
10720HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10721{
10722 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10723
10724#ifdef VBOX_STRICT
10725 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10726 switch (pVmxTransient->uExitReason)
10727 {
10728 case VMX_EXIT_ENCLS:
10729 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10730 break;
10731
10732 case VMX_EXIT_VMFUNC:
10733 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10734 break;
10735 }
10736#endif
10737
10738 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10739 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10740}
10741
10742
10743/**
10744 * Nested-guest VM-exit handler for instructions that provide instruction length as
10745 * well as more information.
10746 *
10747 * Unconditional VM-exit.
10748 */
10749HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10750{
10751 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10752
10753# ifdef VBOX_STRICT
10754 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10755 switch (pVmxTransient->uExitReason)
10756 {
10757 case VMX_EXIT_GDTR_IDTR_ACCESS:
10758 case VMX_EXIT_LDTR_TR_ACCESS:
10759 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10760 break;
10761
10762 case VMX_EXIT_RDRAND:
10763 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10764 break;
10765
10766 case VMX_EXIT_RDSEED:
10767 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10768 break;
10769
10770 case VMX_EXIT_XSAVES:
10771 case VMX_EXIT_XRSTORS:
10772 /** @todo NSTVMX: Verify XSS-bitmap. */
10773 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10774 break;
10775
10776 case VMX_EXIT_UMWAIT:
10777 case VMX_EXIT_TPAUSE:
10778 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10779 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10780 break;
10781
10782 case VMX_EXIT_LOADIWKEY:
10783 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10784 break;
10785 }
10786# endif
10787
10788 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10789 | HMVMX_READ_EXIT_INSTR_LEN
10790 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10791 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10792 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10793}
10794
10795# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10796
10797/**
10798 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10799 * Conditional VM-exit.
10800 */
10801HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10802{
10803 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10804 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10805
10806 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10807 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10808 {
10809 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10810 | HMVMX_READ_EXIT_INSTR_LEN
10811 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10812 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10813 | HMVMX_READ_IDT_VECTORING_INFO
10814 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10815 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10816 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10817 AssertRCReturn(rc, rc);
10818
10819 /*
10820 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10821 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10822 * it's its problem to deal with that issue and we'll clear the recovered event.
10823 */
10824 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10825 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10826 { /*likely*/ }
10827 else
10828 {
10829 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10830 return rcStrict;
10831 }
10832 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10833
10834 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10835 uint64_t const uExitQual = pVmxTransient->uExitQual;
10836
10837 RTGCPTR GCPtrNestedFault;
10838 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10839 if (fIsLinearAddrValid)
10840 {
10841 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10842 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10843 }
10844 else
10845 GCPtrNestedFault = 0;
10846
10847 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10848 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10849 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10850 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10851 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10852
10853 PGMPTWALK Walk;
10854 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10855 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10856 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10857 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10858 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10859 if (RT_SUCCESS(rcStrict))
10860 return rcStrict;
10861
10862 if (fClearEventOnForward)
10863 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10864
10865 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10866 pVmxTransient->uIdtVectoringErrorCode);
10867 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10868 {
10869 VMXVEXITINFO const ExitInfo
10870 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10871 pVmxTransient->uExitQual,
10872 pVmxTransient->cbExitInstr,
10873 pVmxTransient->uGuestLinearAddr,
10874 pVmxTransient->uGuestPhysicalAddr);
10875 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10876 }
10877
10878 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10879 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10880 }
10881
10882 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10883}
10884
10885
10886/**
10887 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10888 * Conditional VM-exit.
10889 */
10890HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10891{
10892 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10893 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10894
10895 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10896 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10897 {
10898 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10899 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10900 AssertRCReturn(rc, rc);
10901
10902 PGMPTWALK Walk;
10903 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10904 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10905 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10906 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10907 0 /* GCPtrNestedFault */, &Walk);
10908 if (RT_SUCCESS(rcStrict))
10909 {
10910 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10911 return rcStrict;
10912 }
10913
10914 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10915 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10916 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10917
10918 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10919 pVmxTransient->uIdtVectoringErrorCode);
10920 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10921 }
10922
10923 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10924}
10925
10926# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10927
10928/** @} */
10929#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10930
10931
10932/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10933 * probes.
10934 *
10935 * The following few functions and associated structure contains the bloat
10936 * necessary for providing detailed debug events and dtrace probes as well as
10937 * reliable host side single stepping. This works on the principle of
10938 * "subclassing" the normal execution loop and workers. We replace the loop
10939 * method completely and override selected helpers to add necessary adjustments
10940 * to their core operation.
10941 *
10942 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10943 * any performance for debug and analysis features.
10944 *
10945 * @{
10946 */
10947
10948/**
10949 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10950 * the debug run loop.
10951 */
10952typedef struct VMXRUNDBGSTATE
10953{
10954 /** The RIP we started executing at. This is for detecting that we stepped. */
10955 uint64_t uRipStart;
10956 /** The CS we started executing with. */
10957 uint16_t uCsStart;
10958
10959 /** Whether we've actually modified the 1st execution control field. */
10960 bool fModifiedProcCtls : 1;
10961 /** Whether we've actually modified the 2nd execution control field. */
10962 bool fModifiedProcCtls2 : 1;
10963 /** Whether we've actually modified the exception bitmap. */
10964 bool fModifiedXcptBitmap : 1;
10965
10966 /** We desire the modified the CR0 mask to be cleared. */
10967 bool fClearCr0Mask : 1;
10968 /** We desire the modified the CR4 mask to be cleared. */
10969 bool fClearCr4Mask : 1;
10970 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10971 uint32_t fCpe1Extra;
10972 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10973 uint32_t fCpe1Unwanted;
10974 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10975 uint32_t fCpe2Extra;
10976 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10977 uint32_t bmXcptExtra;
10978 /** The sequence number of the Dtrace provider settings the state was
10979 * configured against. */
10980 uint32_t uDtraceSettingsSeqNo;
10981 /** VM-exits to check (one bit per VM-exit). */
10982 uint32_t bmExitsToCheck[3];
10983
10984 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10985 uint32_t fProcCtlsInitial;
10986 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10987 uint32_t fProcCtls2Initial;
10988 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10989 uint32_t bmXcptInitial;
10990} VMXRUNDBGSTATE;
10991AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10992typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10993
10994
10995/**
10996 * Initializes the VMXRUNDBGSTATE structure.
10997 *
10998 * @param pVCpu The cross context virtual CPU structure of the
10999 * calling EMT.
11000 * @param pVmxTransient The VMX-transient structure.
11001 * @param pDbgState The debug state to initialize.
11002 */
11003static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11004{
11005 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11006 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11007
11008 pDbgState->fModifiedProcCtls = false;
11009 pDbgState->fModifiedProcCtls2 = false;
11010 pDbgState->fModifiedXcptBitmap = false;
11011 pDbgState->fClearCr0Mask = false;
11012 pDbgState->fClearCr4Mask = false;
11013 pDbgState->fCpe1Extra = 0;
11014 pDbgState->fCpe1Unwanted = 0;
11015 pDbgState->fCpe2Extra = 0;
11016 pDbgState->bmXcptExtra = 0;
11017 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11018 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11019 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11020}
11021
11022
11023/**
11024 * Updates the VMSC fields with changes requested by @a pDbgState.
11025 *
11026 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11027 * immediately before executing guest code, i.e. when interrupts are disabled.
11028 * We don't check status codes here as we cannot easily assert or return in the
11029 * latter case.
11030 *
11031 * @param pVCpu The cross context virtual CPU structure.
11032 * @param pVmxTransient The VMX-transient structure.
11033 * @param pDbgState The debug state.
11034 */
11035static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11036{
11037 /*
11038 * Ensure desired flags in VMCS control fields are set.
11039 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11040 *
11041 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11042 * there should be no stale data in pCtx at this point.
11043 */
11044 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11045 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11046 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11047 {
11048 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11049 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11050 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11051 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11052 pDbgState->fModifiedProcCtls = true;
11053 }
11054
11055 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11056 {
11057 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11058 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11059 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11060 pDbgState->fModifiedProcCtls2 = true;
11061 }
11062
11063 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11064 {
11065 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11066 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11067 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11068 pDbgState->fModifiedXcptBitmap = true;
11069 }
11070
11071 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11072 {
11073 pVmcsInfo->u64Cr0Mask = 0;
11074 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11075 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11076 }
11077
11078 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11079 {
11080 pVmcsInfo->u64Cr4Mask = 0;
11081 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11082 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11083 }
11084
11085 NOREF(pVCpu);
11086}
11087
11088
11089/**
11090 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11091 * re-entry next time around.
11092 *
11093 * @returns Strict VBox status code (i.e. informational status codes too).
11094 * @param pVCpu The cross context virtual CPU structure.
11095 * @param pVmxTransient The VMX-transient structure.
11096 * @param pDbgState The debug state.
11097 * @param rcStrict The return code from executing the guest using single
11098 * stepping.
11099 */
11100static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11101 VBOXSTRICTRC rcStrict)
11102{
11103 /*
11104 * Restore VM-exit control settings as we may not reenter this function the
11105 * next time around.
11106 */
11107 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11108
11109 /* We reload the initial value, trigger what we can of recalculations the
11110 next time around. From the looks of things, that's all that's required atm. */
11111 if (pDbgState->fModifiedProcCtls)
11112 {
11113 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11114 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11115 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11116 AssertRC(rc2);
11117 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11118 }
11119
11120 /* We're currently the only ones messing with this one, so just restore the
11121 cached value and reload the field. */
11122 if ( pDbgState->fModifiedProcCtls2
11123 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11124 {
11125 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11126 AssertRC(rc2);
11127 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11128 }
11129
11130 /* If we've modified the exception bitmap, we restore it and trigger
11131 reloading and partial recalculation the next time around. */
11132 if (pDbgState->fModifiedXcptBitmap)
11133 {
11134 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11135 AssertRC(rc2);
11136 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11137 }
11138
11139 return rcStrict;
11140}
11141
11142
11143/**
11144 * Configures VM-exit controls for current DBGF and DTrace settings.
11145 *
11146 * This updates @a pDbgState and the VMCS execution control fields to reflect
11147 * the necessary VM-exits demanded by DBGF and DTrace.
11148 *
11149 * @param pVCpu The cross context virtual CPU structure.
11150 * @param pVmxTransient The VMX-transient structure. May update
11151 * fUpdatedTscOffsettingAndPreemptTimer.
11152 * @param pDbgState The debug state.
11153 */
11154static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11155{
11156#ifndef IN_NEM_DARWIN
11157 /*
11158 * Take down the dtrace serial number so we can spot changes.
11159 */
11160 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11161 ASMCompilerBarrier();
11162#endif
11163
11164 /*
11165 * We'll rebuild most of the middle block of data members (holding the
11166 * current settings) as we go along here, so start by clearing it all.
11167 */
11168 pDbgState->bmXcptExtra = 0;
11169 pDbgState->fCpe1Extra = 0;
11170 pDbgState->fCpe1Unwanted = 0;
11171 pDbgState->fCpe2Extra = 0;
11172 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11173 pDbgState->bmExitsToCheck[i] = 0;
11174
11175 /*
11176 * Software interrupts (INT XXh) - no idea how to trigger these...
11177 */
11178 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11179 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11180 || VBOXVMM_INT_SOFTWARE_ENABLED())
11181 {
11182 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11183 }
11184
11185 /*
11186 * INT3 breakpoints - triggered by #BP exceptions.
11187 */
11188 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11189 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11190
11191 /*
11192 * Exception bitmap and XCPT events+probes.
11193 */
11194 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11195 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11196 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11197
11198 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11199 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11200 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11201 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11202 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11203 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11204 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11205 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11206 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11207 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11208 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11209 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11210 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11211 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11212 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11213 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11214 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11215 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11216
11217 if (pDbgState->bmXcptExtra)
11218 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11219
11220 /*
11221 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11222 *
11223 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11224 * So, when adding/changing/removing please don't forget to update it.
11225 *
11226 * Some of the macros are picking up local variables to save horizontal space,
11227 * (being able to see it in a table is the lesser evil here).
11228 */
11229#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11230 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11231 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11232#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11233 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11234 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11235 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11236 } else do { } while (0)
11237#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11238 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11239 { \
11240 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11241 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11242 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11243 } else do { } while (0)
11244#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11245 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11246 { \
11247 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11248 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11249 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11250 } else do { } while (0)
11251#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11252 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11253 { \
11254 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11255 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11256 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11257 } else do { } while (0)
11258
11259 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11260 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11261 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11262 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11263 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11264
11265 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11266 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11267 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11268 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11269 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11270 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11271 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11272 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11273 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11274 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11275 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11276 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11277 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11278 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11279 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11280 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11281 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11282 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11283 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11284 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11285 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11286 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11287 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11288 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11289 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11290 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11291 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11292 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11293 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11294 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11295 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11296 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11297 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11298 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11299 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11300 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11301
11302 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11303 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11304 {
11305 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11306 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11307 AssertRC(rc);
11308
11309#if 0 /** @todo fix me */
11310 pDbgState->fClearCr0Mask = true;
11311 pDbgState->fClearCr4Mask = true;
11312#endif
11313 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11314 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11315 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11316 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11317 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11318 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11319 require clearing here and in the loop if we start using it. */
11320 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11321 }
11322 else
11323 {
11324 if (pDbgState->fClearCr0Mask)
11325 {
11326 pDbgState->fClearCr0Mask = false;
11327 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11328 }
11329 if (pDbgState->fClearCr4Mask)
11330 {
11331 pDbgState->fClearCr4Mask = false;
11332 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11333 }
11334 }
11335 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11336 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11337
11338 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11339 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11340 {
11341 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11342 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11343 }
11344 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11345 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11346
11347 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11348 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11349 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11350 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11351 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11352 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11353 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11354 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11355#if 0 /** @todo too slow, fix handler. */
11356 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11357#endif
11358 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11359
11360 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11361 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11362 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11363 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11364 {
11365 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11366 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11367 }
11368 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11369 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11370 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11371 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11372
11373 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11374 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11375 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11376 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11377 {
11378 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11379 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11380 }
11381 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11382 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11383 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11384 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11385
11386 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11387 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11388 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11389 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11390 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11391 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11392 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11393 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11394 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11395 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11396 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11397 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11398 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11399 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11400 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11401 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11402 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11403 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11404 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11405 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11406 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11407 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11408
11409#undef IS_EITHER_ENABLED
11410#undef SET_ONLY_XBM_IF_EITHER_EN
11411#undef SET_CPE1_XBM_IF_EITHER_EN
11412#undef SET_CPEU_XBM_IF_EITHER_EN
11413#undef SET_CPE2_XBM_IF_EITHER_EN
11414
11415 /*
11416 * Sanitize the control stuff.
11417 */
11418 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11419 if (pDbgState->fCpe2Extra)
11420 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11421 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11422 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11423#ifndef IN_NEM_DARWIN
11424 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11425 {
11426 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11427 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11428 }
11429#else
11430 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11431 {
11432 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11433 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11434 }
11435#endif
11436
11437 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11438 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11439 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11440 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11441}
11442
11443
11444/**
11445 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11446 * appropriate.
11447 *
11448 * The caller has checked the VM-exit against the
11449 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11450 * already, so we don't have to do that either.
11451 *
11452 * @returns Strict VBox status code (i.e. informational status codes too).
11453 * @param pVCpu The cross context virtual CPU structure.
11454 * @param pVmxTransient The VMX-transient structure.
11455 * @param uExitReason The VM-exit reason.
11456 *
11457 * @remarks The name of this function is displayed by dtrace, so keep it short
11458 * and to the point. No longer than 33 chars long, please.
11459 */
11460static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11461{
11462 /*
11463 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11464 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11465 *
11466 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11467 * does. Must add/change/remove both places. Same ordering, please.
11468 *
11469 * Added/removed events must also be reflected in the next section
11470 * where we dispatch dtrace events.
11471 */
11472 bool fDtrace1 = false;
11473 bool fDtrace2 = false;
11474 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11475 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11476 uint32_t uEventArg = 0;
11477#define SET_EXIT(a_EventSubName) \
11478 do { \
11479 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11480 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11481 } while (0)
11482#define SET_BOTH(a_EventSubName) \
11483 do { \
11484 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11485 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11486 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11487 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11488 } while (0)
11489 switch (uExitReason)
11490 {
11491 case VMX_EXIT_MTF:
11492 return vmxHCExitMtf(pVCpu, pVmxTransient);
11493
11494 case VMX_EXIT_XCPT_OR_NMI:
11495 {
11496 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11497 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11498 {
11499 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11500 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11501 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11502 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11503 {
11504 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11505 {
11506 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11507 uEventArg = pVmxTransient->uExitIntErrorCode;
11508 }
11509 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11510 switch (enmEvent1)
11511 {
11512 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11513 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11514 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11515 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11516 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11517 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11518 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11519 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11520 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11521 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11522 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11523 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11524 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11525 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11526 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11527 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11528 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11529 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11530 default: break;
11531 }
11532 }
11533 else
11534 AssertFailed();
11535 break;
11536
11537 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11538 uEventArg = idxVector;
11539 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11540 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11541 break;
11542 }
11543 break;
11544 }
11545
11546 case VMX_EXIT_TRIPLE_FAULT:
11547 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11548 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11549 break;
11550 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11551 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11552 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11553 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11554 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11555
11556 /* Instruction specific VM-exits: */
11557 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11558 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11559 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11560 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11561 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11562 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11563 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11564 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11565 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11566 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11567 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11568 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11569 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11570 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11571 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11572 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11573 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11574 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11575 case VMX_EXIT_MOV_CRX:
11576 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11577 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11578 SET_BOTH(CRX_READ);
11579 else
11580 SET_BOTH(CRX_WRITE);
11581 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11582 break;
11583 case VMX_EXIT_MOV_DRX:
11584 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11585 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11586 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11587 SET_BOTH(DRX_READ);
11588 else
11589 SET_BOTH(DRX_WRITE);
11590 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11591 break;
11592 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11593 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11594 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11595 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11596 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11597 case VMX_EXIT_GDTR_IDTR_ACCESS:
11598 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11599 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11600 {
11601 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11602 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11603 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11604 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11605 }
11606 break;
11607
11608 case VMX_EXIT_LDTR_TR_ACCESS:
11609 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11610 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11611 {
11612 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11613 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11614 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11615 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11616 }
11617 break;
11618
11619 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11620 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11621 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11622 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11623 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11624 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11625 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11626 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11627 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11628 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11629 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11630
11631 /* Events that aren't relevant at this point. */
11632 case VMX_EXIT_EXT_INT:
11633 case VMX_EXIT_INT_WINDOW:
11634 case VMX_EXIT_NMI_WINDOW:
11635 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11636 case VMX_EXIT_PREEMPT_TIMER:
11637 case VMX_EXIT_IO_INSTR:
11638 break;
11639
11640 /* Errors and unexpected events. */
11641 case VMX_EXIT_INIT_SIGNAL:
11642 case VMX_EXIT_SIPI:
11643 case VMX_EXIT_IO_SMI:
11644 case VMX_EXIT_SMI:
11645 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11646 case VMX_EXIT_ERR_MSR_LOAD:
11647 case VMX_EXIT_ERR_MACHINE_CHECK:
11648 case VMX_EXIT_PML_FULL:
11649 case VMX_EXIT_VIRTUALIZED_EOI:
11650 break;
11651
11652 default:
11653 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11654 break;
11655 }
11656#undef SET_BOTH
11657#undef SET_EXIT
11658
11659 /*
11660 * Dtrace tracepoints go first. We do them here at once so we don't
11661 * have to copy the guest state saving and stuff a few dozen times.
11662 * Down side is that we've got to repeat the switch, though this time
11663 * we use enmEvent since the probes are a subset of what DBGF does.
11664 */
11665 if (fDtrace1 || fDtrace2)
11666 {
11667 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11668 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11669 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11670 switch (enmEvent1)
11671 {
11672 /** @todo consider which extra parameters would be helpful for each probe. */
11673 case DBGFEVENT_END: break;
11674 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11675 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11676 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11677 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11678 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11679 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11680 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11681 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11682 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11683 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11684 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11685 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11686 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11687 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11688 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11689 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11690 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11691 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11692 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11693 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11694 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11695 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11696 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11697 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11698 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11699 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11700 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11701 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11702 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11703 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11704 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11705 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11706 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11707 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11708 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11709 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11710 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11711 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11712 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11713 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11714 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11715 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11716 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11717 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11718 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11719 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11720 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11721 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11722 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11723 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11724 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11725 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11726 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11727 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11728 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11729 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11730 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11731 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11732 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11733 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11734 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11735 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11736 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11737 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11738 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11739 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11740 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11741 }
11742 switch (enmEvent2)
11743 {
11744 /** @todo consider which extra parameters would be helpful for each probe. */
11745 case DBGFEVENT_END: break;
11746 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11747 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11748 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11749 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11750 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11751 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11752 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11753 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11754 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11755 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11756 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11757 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11758 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11759 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11760 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11761 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11762 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11763 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11764 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11765 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11766 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11767 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11768 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11769 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11770 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11771 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11772 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11773 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11774 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11775 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11776 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11777 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11778 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11779 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11780 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11781 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11782 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11783 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11784 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11785 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11786 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11787 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11788 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11789 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11790 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11791 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11792 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11793 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11794 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11795 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11796 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11797 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11798 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11799 }
11800 }
11801
11802 /*
11803 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11804 * the DBGF call will do a full check).
11805 *
11806 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11807 * Note! If we have to events, we prioritize the first, i.e. the instruction
11808 * one, in order to avoid event nesting.
11809 */
11810 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11811 if ( enmEvent1 != DBGFEVENT_END
11812 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11813 {
11814 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11815 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11816 if (rcStrict != VINF_SUCCESS)
11817 return rcStrict;
11818 }
11819 else if ( enmEvent2 != DBGFEVENT_END
11820 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11821 {
11822 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11823 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11824 if (rcStrict != VINF_SUCCESS)
11825 return rcStrict;
11826 }
11827
11828 return VINF_SUCCESS;
11829}
11830
11831
11832/**
11833 * Single-stepping VM-exit filtering.
11834 *
11835 * This is preprocessing the VM-exits and deciding whether we've gotten far
11836 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11837 * handling is performed.
11838 *
11839 * @returns Strict VBox status code (i.e. informational status codes too).
11840 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11841 * @param pVmxTransient The VMX-transient structure.
11842 * @param pDbgState The debug state.
11843 */
11844DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11845{
11846 /*
11847 * Expensive (saves context) generic dtrace VM-exit probe.
11848 */
11849 uint32_t const uExitReason = pVmxTransient->uExitReason;
11850 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11851 { /* more likely */ }
11852 else
11853 {
11854 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11855 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11856 AssertRC(rc);
11857 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11858 }
11859
11860#ifndef IN_NEM_DARWIN
11861 /*
11862 * Check for host NMI, just to get that out of the way.
11863 */
11864 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11865 { /* normally likely */ }
11866 else
11867 {
11868 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11869 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11870 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11871 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11872 }
11873#endif
11874
11875 /*
11876 * Check for single stepping event if we're stepping.
11877 */
11878 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11879 {
11880 switch (uExitReason)
11881 {
11882 case VMX_EXIT_MTF:
11883 return vmxHCExitMtf(pVCpu, pVmxTransient);
11884
11885 /* Various events: */
11886 case VMX_EXIT_XCPT_OR_NMI:
11887 case VMX_EXIT_EXT_INT:
11888 case VMX_EXIT_TRIPLE_FAULT:
11889 case VMX_EXIT_INT_WINDOW:
11890 case VMX_EXIT_NMI_WINDOW:
11891 case VMX_EXIT_TASK_SWITCH:
11892 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11893 case VMX_EXIT_APIC_ACCESS:
11894 case VMX_EXIT_EPT_VIOLATION:
11895 case VMX_EXIT_EPT_MISCONFIG:
11896 case VMX_EXIT_PREEMPT_TIMER:
11897
11898 /* Instruction specific VM-exits: */
11899 case VMX_EXIT_CPUID:
11900 case VMX_EXIT_GETSEC:
11901 case VMX_EXIT_HLT:
11902 case VMX_EXIT_INVD:
11903 case VMX_EXIT_INVLPG:
11904 case VMX_EXIT_RDPMC:
11905 case VMX_EXIT_RDTSC:
11906 case VMX_EXIT_RSM:
11907 case VMX_EXIT_VMCALL:
11908 case VMX_EXIT_VMCLEAR:
11909 case VMX_EXIT_VMLAUNCH:
11910 case VMX_EXIT_VMPTRLD:
11911 case VMX_EXIT_VMPTRST:
11912 case VMX_EXIT_VMREAD:
11913 case VMX_EXIT_VMRESUME:
11914 case VMX_EXIT_VMWRITE:
11915 case VMX_EXIT_VMXOFF:
11916 case VMX_EXIT_VMXON:
11917 case VMX_EXIT_MOV_CRX:
11918 case VMX_EXIT_MOV_DRX:
11919 case VMX_EXIT_IO_INSTR:
11920 case VMX_EXIT_RDMSR:
11921 case VMX_EXIT_WRMSR:
11922 case VMX_EXIT_MWAIT:
11923 case VMX_EXIT_MONITOR:
11924 case VMX_EXIT_PAUSE:
11925 case VMX_EXIT_GDTR_IDTR_ACCESS:
11926 case VMX_EXIT_LDTR_TR_ACCESS:
11927 case VMX_EXIT_INVEPT:
11928 case VMX_EXIT_RDTSCP:
11929 case VMX_EXIT_INVVPID:
11930 case VMX_EXIT_WBINVD:
11931 case VMX_EXIT_XSETBV:
11932 case VMX_EXIT_RDRAND:
11933 case VMX_EXIT_INVPCID:
11934 case VMX_EXIT_VMFUNC:
11935 case VMX_EXIT_RDSEED:
11936 case VMX_EXIT_XSAVES:
11937 case VMX_EXIT_XRSTORS:
11938 {
11939 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11940 AssertRCReturn(rc, rc);
11941 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11942 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11943 return VINF_EM_DBG_STEPPED;
11944 break;
11945 }
11946
11947 /* Errors and unexpected events: */
11948 case VMX_EXIT_INIT_SIGNAL:
11949 case VMX_EXIT_SIPI:
11950 case VMX_EXIT_IO_SMI:
11951 case VMX_EXIT_SMI:
11952 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11953 case VMX_EXIT_ERR_MSR_LOAD:
11954 case VMX_EXIT_ERR_MACHINE_CHECK:
11955 case VMX_EXIT_PML_FULL:
11956 case VMX_EXIT_VIRTUALIZED_EOI:
11957 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11958 break;
11959
11960 default:
11961 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11962 break;
11963 }
11964 }
11965
11966 /*
11967 * Check for debugger event breakpoints and dtrace probes.
11968 */
11969 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11970 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11971 {
11972 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11973 if (rcStrict != VINF_SUCCESS)
11974 return rcStrict;
11975 }
11976
11977 /*
11978 * Normal processing.
11979 */
11980#ifdef HMVMX_USE_FUNCTION_TABLE
11981 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11982#else
11983 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11984#endif
11985}
11986
11987/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette