VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 98464

Last change on this file since 98464 was 98464, checked in by vboxsync, 2 years ago

VMM/VMXAllTemplate.cpp.h: Nested VMX: bugref:10318 Need this for debugging.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 526.5 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 98464 2023-02-03 12:26:19Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related force-flags.
1700 *
1701 * @returns Guest's interruptibility-state.
1702 * @param pVCpu The cross context virtual CPU structure.
1703 *
1704 * @remarks No-long-jump zone!!!
1705 */
1706static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1707{
1708 uint32_t fIntrState;
1709
1710 /*
1711 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1712 */
1713 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1714 fIntrState = 0;
1715 else
1716 {
1717 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1719
1720 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1721 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1722 else
1723 {
1724 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1725
1726 /* Block-by-STI must not be set when interrupts are disabled. */
1727 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1728 }
1729 }
1730
1731 /*
1732 * Check if we should inhibit NMI delivery.
1733 */
1734 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1735 { /* likely */ }
1736 else
1737 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1738
1739 /*
1740 * Validate.
1741 */
1742 /* We don't support block-by-SMI yet.*/
1743 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1744
1745 return fIntrState;
1746}
1747
1748
1749/**
1750 * Exports the exception intercepts required for guest execution in the VMCS.
1751 *
1752 * @param pVCpu The cross context virtual CPU structure.
1753 * @param pVmxTransient The VMX-transient structure.
1754 *
1755 * @remarks No-long-jump zone!!!
1756 */
1757static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1758{
1759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1760 {
1761 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1762 if ( !pVmxTransient->fIsNestedGuest
1763 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1764 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1765 else
1766 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1767
1768 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1769 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1770 }
1771}
1772
1773
1774/**
1775 * Exports the guest's RIP into the guest-state area in the VMCS.
1776 *
1777 * @param pVCpu The cross context virtual CPU structure.
1778 *
1779 * @remarks No-long-jump zone!!!
1780 */
1781static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1782{
1783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1784 {
1785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1786
1787 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1788 AssertRC(rc);
1789
1790 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1791 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1792 }
1793}
1794
1795
1796/**
1797 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1798 *
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param pVmxTransient The VMX-transient structure.
1801 *
1802 * @remarks No-long-jump zone!!!
1803 */
1804static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1805{
1806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1807 {
1808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1809
1810 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1811 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1812 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1813 Use 32-bit VMWRITE. */
1814 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1815 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1816 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1817
1818#ifndef IN_NEM_DARWIN
1819 /*
1820 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1821 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1822 * can run the real-mode guest code under Virtual 8086 mode.
1823 */
1824 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1825 if (pVmcsInfo->RealMode.fRealOnV86Active)
1826 {
1827 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1828 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1829 Assert(!pVmxTransient->fIsNestedGuest);
1830 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1831 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1832 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1833 }
1834#else
1835 RT_NOREF(pVmxTransient);
1836#endif
1837
1838 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1839 AssertRC(rc);
1840
1841 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1842 Log4Func(("eflags=%#RX32\n", fEFlags));
1843 }
1844}
1845
1846
1847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1848/**
1849 * Copies the nested-guest VMCS to the shadow VMCS.
1850 *
1851 * @returns VBox status code.
1852 * @param pVCpu The cross context virtual CPU structure.
1853 * @param pVmcsInfo The VMCS info. object.
1854 *
1855 * @remarks No-long-jump zone!!!
1856 */
1857static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1858{
1859 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1860 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1861
1862 /*
1863 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1864 * current VMCS, as we may try saving guest lazy MSRs.
1865 *
1866 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1867 * calling the import VMCS code which is currently performing the guest MSR reads
1868 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1869 * and the rest of the VMX leave session machinery.
1870 */
1871 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1872
1873 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1874 if (RT_SUCCESS(rc))
1875 {
1876 /*
1877 * Copy all guest read/write VMCS fields.
1878 *
1879 * We don't check for VMWRITE failures here for performance reasons and
1880 * because they are not expected to fail, barring irrecoverable conditions
1881 * like hardware errors.
1882 */
1883 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1884 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1885 {
1886 uint64_t u64Val;
1887 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1888 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1889 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1890 }
1891
1892 /*
1893 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1894 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1895 */
1896 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1897 {
1898 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1899 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906 }
1907
1908 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1909 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1910 }
1911
1912 ASMSetFlags(fEFlags);
1913 return rc;
1914}
1915
1916
1917/**
1918 * Copies the shadow VMCS to the nested-guest VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVCpu The cross context virtual CPU structure.
1922 * @param pVmcsInfo The VMCS info. object.
1923 *
1924 * @remarks Called with interrupts disabled.
1925 */
1926static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1927{
1928 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1929 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1930 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1931
1932 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1933 if (RT_SUCCESS(rc))
1934 {
1935 /*
1936 * Copy guest read/write fields from the shadow VMCS.
1937 * Guest read-only fields cannot be modified, so no need to copy them.
1938 *
1939 * We don't check for VMREAD failures here for performance reasons and
1940 * because they are not expected to fail, barring irrecoverable conditions
1941 * like hardware errors.
1942 */
1943 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1944 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1945 {
1946 uint64_t u64Val;
1947 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1948 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1949 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1950 }
1951
1952 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1953 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1954 }
1955 return rc;
1956}
1957
1958
1959/**
1960 * Enables VMCS shadowing for the given VMCS info. object.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 * @param pVmcsInfo The VMCS info. object.
1964 *
1965 * @remarks No-long-jump zone!!!
1966 */
1967static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1968{
1969 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1970 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1971 {
1972 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1973 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1974 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1975 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1976 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1977 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1978 Log4Func(("Enabled\n"));
1979 }
1980}
1981
1982
1983/**
1984 * Disables VMCS shadowing for the given VMCS info. object.
1985 *
1986 * @param pVCpu The cross context virtual CPU structure.
1987 * @param pVmcsInfo The VMCS info. object.
1988 *
1989 * @remarks No-long-jump zone!!!
1990 */
1991static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1992{
1993 /*
1994 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1995 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1996 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1997 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1998 *
1999 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2000 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2001 */
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2004 {
2005 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2007 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2008 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2009 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2010 Log4Func(("Disabled\n"));
2011 }
2012}
2013#endif
2014
2015
2016/**
2017 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2018 *
2019 * The guest FPU state is always pre-loaded hence we don't need to bother about
2020 * sharing FPU related CR0 bits between the guest and host.
2021 *
2022 * @returns VBox status code.
2023 * @param pVCpu The cross context virtual CPU structure.
2024 * @param pVmxTransient The VMX-transient structure.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2029{
2030 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2031 {
2032 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2033 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2034
2035 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2036 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2037 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2038 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2039 else
2040 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2041
2042 if (!pVmxTransient->fIsNestedGuest)
2043 {
2044 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2045 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2046 uint64_t const u64ShadowCr0 = u64GuestCr0;
2047 Assert(!RT_HI_U32(u64GuestCr0));
2048
2049 /*
2050 * Setup VT-x's view of the guest CR0.
2051 */
2052 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2053 if (VM_IS_VMX_NESTED_PAGING(pVM))
2054 {
2055#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2056 if (CPUMIsGuestPagingEnabled(pVCpu))
2057 {
2058 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2059 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2060 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2061 }
2062 else
2063 {
2064 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2065 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2066 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2067 }
2068
2069 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2072#endif
2073 }
2074 else
2075 {
2076 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2077 u64GuestCr0 |= X86_CR0_WP;
2078 }
2079
2080 /*
2081 * Guest FPU bits.
2082 *
2083 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2084 * using CR0.TS.
2085 *
2086 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2087 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2088 */
2089 u64GuestCr0 |= X86_CR0_NE;
2090
2091 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2092 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2093
2094 /*
2095 * Update exception intercepts.
2096 */
2097 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2098#ifndef IN_NEM_DARWIN
2099 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2100 {
2101 Assert(PDMVmmDevHeapIsEnabled(pVM));
2102 Assert(pVM->hm.s.vmx.pRealModeTSS);
2103 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2104 }
2105 else
2106#endif
2107 {
2108 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2109 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2110 if (fInterceptMF)
2111 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2112 }
2113
2114 /* Additional intercepts for debugging, define these yourself explicitly. */
2115#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2116 uXcptBitmap |= 0
2117 | RT_BIT(X86_XCPT_BP)
2118 | RT_BIT(X86_XCPT_DE)
2119 | RT_BIT(X86_XCPT_NM)
2120 | RT_BIT(X86_XCPT_TS)
2121 | RT_BIT(X86_XCPT_UD)
2122 | RT_BIT(X86_XCPT_NP)
2123 | RT_BIT(X86_XCPT_SS)
2124 | RT_BIT(X86_XCPT_GP)
2125 | RT_BIT(X86_XCPT_PF)
2126 | RT_BIT(X86_XCPT_MF)
2127 ;
2128#elif defined(HMVMX_ALWAYS_TRAP_PF)
2129 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2130#endif
2131 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2133 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2134 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2135 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2136
2137 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2138 u64GuestCr0 |= fSetCr0;
2139 u64GuestCr0 &= fZapCr0;
2140 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2141
2142 Assert(!RT_HI_U32(u64GuestCr0));
2143 Assert(u64GuestCr0 & X86_CR0_NE);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177
2178 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2179 u64GuestCr0 |= fSetCr0;
2180 u64GuestCr0 &= fZapCr0;
2181 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2182
2183 Assert(!RT_HI_U32(u64GuestCr0));
2184 Assert(u64GuestCr0 & X86_CR0_NE);
2185
2186 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2187 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2188 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2189
2190 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2191 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2192 }
2193
2194 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2195 }
2196
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Exports the guest control registers (CR3, CR4) into the guest-state area
2203 * in the VMCS.
2204 *
2205 * @returns VBox strict status code.
2206 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2207 * without unrestricted guest access and the VMMDev is not presently
2208 * mapped (e.g. EFI32).
2209 *
2210 * @param pVCpu The cross context virtual CPU structure.
2211 * @param pVmxTransient The VMX-transient structure.
2212 *
2213 * @remarks No-long-jump zone!!!
2214 */
2215static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2216{
2217 int rc = VINF_SUCCESS;
2218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2219
2220 /*
2221 * Guest CR2.
2222 * It's always loaded in the assembler code. Nothing to do here.
2223 */
2224
2225 /*
2226 * Guest CR3.
2227 */
2228 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2229 {
2230 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2231
2232 if (VM_IS_VMX_NESTED_PAGING(pVM))
2233 {
2234#ifndef IN_NEM_DARWIN
2235 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2236 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2237
2238 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2239 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2240 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2241 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2242
2243 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2244 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2245 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2246
2247 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2248 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2249 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2250 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2251 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2252 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2253 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2254
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2256 AssertRC(rc);
2257#endif
2258
2259 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2260 uint64_t u64GuestCr3 = pCtx->cr3;
2261 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2262 || CPUMIsGuestPagingEnabledEx(pCtx))
2263 {
2264 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2265 if (CPUMIsGuestInPAEModeEx(pCtx))
2266 {
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2271 }
2272
2273 /*
2274 * The guest's view of its CR3 is unblemished with nested paging when the
2275 * guest is using paging or we have unrestricted guest execution to handle
2276 * the guest when it's not using paging.
2277 */
2278 }
2279#ifndef IN_NEM_DARWIN
2280 else
2281 {
2282 /*
2283 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2284 * thinks it accesses physical memory directly, we use our identity-mapped
2285 * page table to map guest-linear to guest-physical addresses. EPT takes care
2286 * of translating it to host-physical addresses.
2287 */
2288 RTGCPHYS GCPhys;
2289 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2290
2291 /* We obtain it here every time as the guest could have relocated this PCI region. */
2292 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2293 if (RT_SUCCESS(rc))
2294 { /* likely */ }
2295 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2296 {
2297 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2298 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2299 }
2300 else
2301 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2302
2303 u64GuestCr3 = GCPhys;
2304 }
2305#endif
2306
2307 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2308 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2309 AssertRC(rc);
2310 }
2311 else
2312 {
2313 Assert(!pVmxTransient->fIsNestedGuest);
2314 /* Non-nested paging case, just use the hypervisor's CR3. */
2315 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2316
2317 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2318 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2319 AssertRC(rc);
2320 }
2321
2322 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2323 }
2324
2325 /*
2326 * Guest CR4.
2327 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2328 */
2329 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2330 {
2331 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2332 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2333
2334 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2335 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2336
2337 /*
2338 * With nested-guests, we may have extended the guest/host mask here (since we
2339 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2340 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2341 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2342 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2343 */
2344 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2345 uint64_t u64GuestCr4 = pCtx->cr4;
2346 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2347 ? pCtx->cr4
2348 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2349 Assert(!RT_HI_U32(u64GuestCr4));
2350
2351#ifndef IN_NEM_DARWIN
2352 /*
2353 * Setup VT-x's view of the guest CR4.
2354 *
2355 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2356 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2357 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2358 *
2359 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2360 */
2361 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2362 {
2363 Assert(pVM->hm.s.vmx.pRealModeTSS);
2364 Assert(PDMVmmDevHeapIsEnabled(pVM));
2365 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2366 }
2367#endif
2368
2369 if (VM_IS_VMX_NESTED_PAGING(pVM))
2370 {
2371 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2372 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2373 {
2374 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2375 u64GuestCr4 |= X86_CR4_PSE;
2376 /* Our identity mapping is a 32-bit page directory. */
2377 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2378 }
2379 /* else use guest CR4.*/
2380 }
2381 else
2382 {
2383 Assert(!pVmxTransient->fIsNestedGuest);
2384
2385 /*
2386 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2387 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2388 */
2389 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2390 {
2391 case PGMMODE_REAL: /* Real-mode. */
2392 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2393 case PGMMODE_32_BIT: /* 32-bit paging. */
2394 {
2395 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2396 break;
2397 }
2398
2399 case PGMMODE_PAE: /* PAE paging. */
2400 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2401 {
2402 u64GuestCr4 |= X86_CR4_PAE;
2403 break;
2404 }
2405
2406 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2407 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2408 {
2409#ifdef VBOX_WITH_64_BITS_GUESTS
2410 /* For our assumption in vmxHCShouldSwapEferMsr. */
2411 Assert(u64GuestCr4 & X86_CR4_PAE);
2412 break;
2413#endif
2414 }
2415 default:
2416 AssertFailed();
2417 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2418 }
2419 }
2420
2421 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2422 u64GuestCr4 |= fSetCr4;
2423 u64GuestCr4 &= fZapCr4;
2424
2425 Assert(!RT_HI_U32(u64GuestCr4));
2426 Assert(u64GuestCr4 & X86_CR4_VMXE);
2427
2428 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2429 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2431
2432#ifndef IN_NEM_DARWIN
2433 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2434 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2435 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2436 {
2437 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2438 hmR0VmxUpdateStartVmFunction(pVCpu);
2439 }
2440#endif
2441
2442 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2443
2444 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2445 }
2446 return rc;
2447}
2448
2449
2450#ifdef VBOX_STRICT
2451/**
2452 * Strict function to validate segment registers.
2453 *
2454 * @param pVCpu The cross context virtual CPU structure.
2455 * @param pVmcsInfo The VMCS info. object.
2456 *
2457 * @remarks Will import guest CR0 on strict builds during validation of
2458 * segments.
2459 */
2460static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2461{
2462 /*
2463 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2464 *
2465 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2466 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2467 * unusable bit and doesn't change the guest-context value.
2468 */
2469 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2470 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2471 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2472 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2473 && ( !CPUMIsGuestInRealModeEx(pCtx)
2474 && !CPUMIsGuestInV86ModeEx(pCtx)))
2475 {
2476 /* Protected mode checks */
2477 /* CS */
2478 Assert(pCtx->cs.Attr.n.u1Present);
2479 Assert(!(pCtx->cs.Attr.u & 0xf00));
2480 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2481 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2482 || !(pCtx->cs.Attr.n.u1Granularity));
2483 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2484 || (pCtx->cs.Attr.n.u1Granularity));
2485 /* CS cannot be loaded with NULL in protected mode. */
2486 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2487 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2488 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2489 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2490 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2491 else
2492 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2493 /* SS */
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2496 if ( !(pCtx->cr0 & X86_CR0_PE)
2497 || pCtx->cs.Attr.n.u4Type == 3)
2498 {
2499 Assert(!pCtx->ss.Attr.n.u2Dpl);
2500 }
2501 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2502 {
2503 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2504 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2505 Assert(pCtx->ss.Attr.n.u1Present);
2506 Assert(!(pCtx->ss.Attr.u & 0xf00));
2507 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2508 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2509 || !(pCtx->ss.Attr.n.u1Granularity));
2510 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2511 || (pCtx->ss.Attr.n.u1Granularity));
2512 }
2513 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2514 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2515 {
2516 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2517 Assert(pCtx->ds.Attr.n.u1Present);
2518 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2519 Assert(!(pCtx->ds.Attr.u & 0xf00));
2520 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2521 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2522 || !(pCtx->ds.Attr.n.u1Granularity));
2523 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2524 || (pCtx->ds.Attr.n.u1Granularity));
2525 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2526 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2527 }
2528 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->es.Attr.n.u1Present);
2532 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->es.Attr.u & 0xf00));
2534 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->es.Attr.n.u1Granularity));
2537 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2538 || (pCtx->es.Attr.n.u1Granularity));
2539 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->fs.Attr.n.u1Present);
2546 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->fs.Attr.u & 0xf00));
2548 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->fs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2552 || (pCtx->fs.Attr.n.u1Granularity));
2553 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->gs.Attr.n.u1Present);
2560 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->gs.Attr.u & 0xf00));
2562 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->gs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2566 || (pCtx->gs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 /* 64-bit capable CPUs. */
2571 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2572 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2573 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2574 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2575 }
2576 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2577 || ( CPUMIsGuestInRealModeEx(pCtx)
2578 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2579 {
2580 /* Real and v86 mode checks. */
2581 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2582 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2583#ifndef IN_NEM_DARWIN
2584 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2585 {
2586 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2587 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2588 }
2589 else
2590#endif
2591 {
2592 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2593 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2594 }
2595
2596 /* CS */
2597 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2598 Assert(pCtx->cs.u32Limit == 0xffff);
2599 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2600 /* SS */
2601 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2602 Assert(pCtx->ss.u32Limit == 0xffff);
2603 Assert(u32SSAttr == 0xf3);
2604 /* DS */
2605 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2606 Assert(pCtx->ds.u32Limit == 0xffff);
2607 Assert(u32DSAttr == 0xf3);
2608 /* ES */
2609 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2610 Assert(pCtx->es.u32Limit == 0xffff);
2611 Assert(u32ESAttr == 0xf3);
2612 /* FS */
2613 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2614 Assert(pCtx->fs.u32Limit == 0xffff);
2615 Assert(u32FSAttr == 0xf3);
2616 /* GS */
2617 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2618 Assert(pCtx->gs.u32Limit == 0xffff);
2619 Assert(u32GSAttr == 0xf3);
2620 /* 64-bit capable CPUs. */
2621 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2622 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2623 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2624 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2625 }
2626}
2627#endif /* VBOX_STRICT */
2628
2629
2630/**
2631 * Exports a guest segment register into the guest-state area in the VMCS.
2632 *
2633 * @returns VBox status code.
2634 * @param pVCpu The cross context virtual CPU structure.
2635 * @param pVmcsInfo The VMCS info. object.
2636 * @param iSegReg The segment register number (X86_SREG_XXX).
2637 * @param pSelReg Pointer to the segment selector.
2638 *
2639 * @remarks No-long-jump zone!!!
2640 */
2641static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2642{
2643 Assert(iSegReg < X86_SREG_COUNT);
2644
2645 uint32_t u32Access = pSelReg->Attr.u;
2646#ifndef IN_NEM_DARWIN
2647 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2648#endif
2649 {
2650 /*
2651 * The way to differentiate between whether this is really a null selector or was just
2652 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2653 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2654 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2655 * NULL selectors loaded in protected-mode have their attribute as 0.
2656 */
2657 if (u32Access)
2658 { }
2659 else
2660 u32Access = X86DESCATTR_UNUSABLE;
2661 }
2662#ifndef IN_NEM_DARWIN
2663 else
2664 {
2665 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2666 u32Access = 0xf3;
2667 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2668 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2669 RT_NOREF_PV(pVCpu);
2670 }
2671#else
2672 RT_NOREF(pVmcsInfo);
2673#endif
2674
2675 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2676 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2677 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2678
2679 /*
2680 * Commit it to the VMCS.
2681 */
2682 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2683 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2686 return VINF_SUCCESS;
2687}
2688
2689
2690/**
2691 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2692 * area in the VMCS.
2693 *
2694 * @returns VBox status code.
2695 * @param pVCpu The cross context virtual CPU structure.
2696 * @param pVmxTransient The VMX-transient structure.
2697 *
2698 * @remarks Will import guest CR0 on strict builds during validation of
2699 * segments.
2700 * @remarks No-long-jump zone!!!
2701 */
2702static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2703{
2704 int rc = VERR_INTERNAL_ERROR_5;
2705#ifndef IN_NEM_DARWIN
2706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2707#endif
2708 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2709 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2710#ifndef IN_NEM_DARWIN
2711 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2712#endif
2713
2714 /*
2715 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2716 */
2717 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2718 {
2719 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2720 {
2721 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2722#ifndef IN_NEM_DARWIN
2723 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2724 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2725#endif
2726 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2727 AssertRC(rc);
2728 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2729 }
2730
2731 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2732 {
2733 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2734#ifndef IN_NEM_DARWIN
2735 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2736 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2737#endif
2738 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2739 AssertRC(rc);
2740 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2741 }
2742
2743 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2744 {
2745 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2746#ifndef IN_NEM_DARWIN
2747 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2748 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2749#endif
2750 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2751 AssertRC(rc);
2752 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2753 }
2754
2755 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2756 {
2757 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2758#ifndef IN_NEM_DARWIN
2759 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2760 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2761#endif
2762 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2763 AssertRC(rc);
2764 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2765 }
2766
2767 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2768 {
2769 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2770#ifndef IN_NEM_DARWIN
2771 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2772 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2773#endif
2774 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2775 AssertRC(rc);
2776 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2777 }
2778
2779 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2780 {
2781 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2782#ifndef IN_NEM_DARWIN
2783 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2784 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2785#endif
2786 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2787 AssertRC(rc);
2788 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2789 }
2790
2791#ifdef VBOX_STRICT
2792 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2793#endif
2794 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2795 pCtx->cs.Attr.u));
2796 }
2797
2798 /*
2799 * Guest TR.
2800 */
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2804
2805 /*
2806 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2807 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2808 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2809 */
2810 uint16_t u16Sel;
2811 uint32_t u32Limit;
2812 uint64_t u64Base;
2813 uint32_t u32AccessRights;
2814#ifndef IN_NEM_DARWIN
2815 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2816#endif
2817 {
2818 u16Sel = pCtx->tr.Sel;
2819 u32Limit = pCtx->tr.u32Limit;
2820 u64Base = pCtx->tr.u64Base;
2821 u32AccessRights = pCtx->tr.Attr.u;
2822 }
2823#ifndef IN_NEM_DARWIN
2824 else
2825 {
2826 Assert(!pVmxTransient->fIsNestedGuest);
2827 Assert(pVM->hm.s.vmx.pRealModeTSS);
2828 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2829
2830 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2831 RTGCPHYS GCPhys;
2832 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2833 AssertRCReturn(rc, rc);
2834
2835 X86DESCATTR DescAttr;
2836 DescAttr.u = 0;
2837 DescAttr.n.u1Present = 1;
2838 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2839
2840 u16Sel = 0;
2841 u32Limit = HM_VTX_TSS_SIZE;
2842 u64Base = GCPhys;
2843 u32AccessRights = DescAttr.u;
2844 }
2845#endif
2846
2847 /* Validate. */
2848 Assert(!(u16Sel & RT_BIT(2)));
2849 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2850 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2851 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2852 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2853 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2854 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2855 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2856 Assert( (u32Limit & 0xfff) == 0xfff
2857 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2858 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2859 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2860
2861 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2865
2866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2867 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2868 }
2869
2870 /*
2871 * Guest GDTR.
2872 */
2873 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2874 {
2875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2876
2877 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2878 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2879
2880 /* Validate. */
2881 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2882
2883 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2884 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2885 }
2886
2887 /*
2888 * Guest LDTR.
2889 */
2890 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2891 {
2892 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2893
2894 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2895 uint32_t u32Access;
2896 if ( !pVmxTransient->fIsNestedGuest
2897 && !pCtx->ldtr.Attr.u)
2898 u32Access = X86DESCATTR_UNUSABLE;
2899 else
2900 u32Access = pCtx->ldtr.Attr.u;
2901
2902 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2906
2907 /* Validate. */
2908 if (!(u32Access & X86DESCATTR_UNUSABLE))
2909 {
2910 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2911 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2912 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2913 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2914 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2915 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2916 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2917 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2918 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2919 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2920 }
2921
2922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2923 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2924 }
2925
2926 /*
2927 * Guest IDTR.
2928 */
2929 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2930 {
2931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2932
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2935
2936 /* Validate. */
2937 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2938
2939 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2940 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2941 }
2942
2943 return VINF_SUCCESS;
2944}
2945
2946
2947/**
2948 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2949 * VM-exit interruption info type.
2950 *
2951 * @returns The IEM exception flags.
2952 * @param uVector The event vector.
2953 * @param uVmxEventType The VMX event type.
2954 *
2955 * @remarks This function currently only constructs flags required for
2956 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2957 * and CR2 aspects of an exception are not included).
2958 */
2959static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2960{
2961 uint32_t fIemXcptFlags;
2962 switch (uVmxEventType)
2963 {
2964 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2965 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2966 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2967 break;
2968
2969 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2971 break;
2972
2973 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2975 break;
2976
2977 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2978 {
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2980 if (uVector == X86_XCPT_BP)
2981 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2982 else if (uVector == X86_XCPT_OF)
2983 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2984 else
2985 {
2986 fIemXcptFlags = 0;
2987 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2988 }
2989 break;
2990 }
2991
2992 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2993 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2994 break;
2995
2996 default:
2997 fIemXcptFlags = 0;
2998 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2999 break;
3000 }
3001 return fIemXcptFlags;
3002}
3003
3004
3005/**
3006 * Sets an event as a pending event to be injected into the guest.
3007 *
3008 * @param pVCpu The cross context virtual CPU structure.
3009 * @param u32IntInfo The VM-entry interruption-information field.
3010 * @param cbInstr The VM-entry instruction length in bytes (for
3011 * software interrupts, exceptions and privileged
3012 * software exceptions).
3013 * @param u32ErrCode The VM-entry exception error code.
3014 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3015 * page-fault.
3016 */
3017DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3018 RTGCUINTPTR GCPtrFaultAddress)
3019{
3020 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3021 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3022 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3024 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3025 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3026}
3027
3028
3029/**
3030 * Sets an external interrupt as pending-for-injection into the VM.
3031 *
3032 * @param pVCpu The cross context virtual CPU structure.
3033 * @param u8Interrupt The external interrupt vector.
3034 */
3035DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3036{
3037 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3041 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3042}
3043
3044
3045/**
3046 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3047 *
3048 * @param pVCpu The cross context virtual CPU structure.
3049 */
3050DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3051{
3052 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3053 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3056 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3057}
3058
3059
3060/**
3061 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3062 *
3063 * @param pVCpu The cross context virtual CPU structure.
3064 */
3065DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3106/**
3107 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 * @param u32ErrCode The error code for the general-protection exception.
3111 */
3112DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3113{
3114 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3118 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3119}
3120
3121
3122/**
3123 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param u32ErrCode The error code for the stack exception.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3135}
3136#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3137
3138
3139/**
3140 * Fixes up attributes for the specified segment register.
3141 *
3142 * @param pVCpu The cross context virtual CPU structure.
3143 * @param pSelReg The segment register that needs fixing.
3144 * @param pszRegName The register name (for logging and assertions).
3145 */
3146static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3147{
3148 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3149
3150 /*
3151 * If VT-x marks the segment as unusable, most other bits remain undefined:
3152 * - For CS the L, D and G bits have meaning.
3153 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3154 * - For the remaining data segments no bits are defined.
3155 *
3156 * The present bit and the unusable bit has been observed to be set at the
3157 * same time (the selector was supposed to be invalid as we started executing
3158 * a V8086 interrupt in ring-0).
3159 *
3160 * What should be important for the rest of the VBox code, is that the P bit is
3161 * cleared. Some of the other VBox code recognizes the unusable bit, but
3162 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3163 * safe side here, we'll strip off P and other bits we don't care about. If
3164 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3165 *
3166 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3167 */
3168#ifdef VBOX_STRICT
3169 uint32_t const uAttr = pSelReg->Attr.u;
3170#endif
3171
3172 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3173 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3174 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3175
3176#ifdef VBOX_STRICT
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Disable(pVCpu);
3179# endif
3180 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3181# ifdef DEBUG_bird
3182 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3183 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3184 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3185# endif
3186# ifndef IN_NEM_DARWIN
3187 VMMRZCallRing3Enable(pVCpu);
3188# endif
3189 NOREF(uAttr);
3190#endif
3191 RT_NOREF2(pVCpu, pszRegName);
3192}
3193
3194
3195/**
3196 * Imports a guest segment register from the current VMCS into the guest-CPU
3197 * context.
3198 *
3199 * @param pVCpu The cross context virtual CPU structure.
3200 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3201 *
3202 * @remarks Called with interrupts and/or preemption disabled.
3203 */
3204template<uint32_t const a_iSegReg>
3205DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3206{
3207 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3208 /* Check that the macros we depend upon here and in the export parenter function works: */
3209#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3210 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3211 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3212 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3213 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3216 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3217 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3218 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3219 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3220
3221 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3222
3223 uint16_t u16Sel;
3224 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3225 pSelReg->Sel = u16Sel;
3226 pSelReg->ValidSel = u16Sel;
3227
3228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3229 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3230
3231 uint32_t u32Attr;
3232 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3233 pSelReg->Attr.u = u32Attr;
3234 if (u32Attr & X86DESCATTR_UNUSABLE)
3235 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3236
3237 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3238}
3239
3240
3241/**
3242 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure.
3245 *
3246 * @remarks Called with interrupts and/or preemption disabled.
3247 */
3248DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3249{
3250 uint16_t u16Sel;
3251 uint64_t u64Base;
3252 uint32_t u32Limit, u32Attr;
3253 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3255 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3257
3258 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3259 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3260 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3261 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3262 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3263 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3264 if (u32Attr & X86DESCATTR_UNUSABLE)
3265 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3266}
3267
3268
3269/**
3270 * Imports the guest TR from the current VMCS into the guest-CPU context.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure.
3273 *
3274 * @remarks Called with interrupts and/or preemption disabled.
3275 */
3276DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3277{
3278 uint16_t u16Sel;
3279 uint64_t u64Base;
3280 uint32_t u32Limit, u32Attr;
3281 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3282 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3283 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3284 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3285
3286 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3287 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3288 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3289 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3290 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3291 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3292 /* TR is the only selector that can never be unusable. */
3293 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3294}
3295
3296
3297/**
3298 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3299 *
3300 * @returns The RIP value.
3301 * @param pVCpu The cross context virtual CPU structure.
3302 *
3303 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3304 * @remarks Do -not- call this function directly!
3305 */
3306DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3307{
3308 uint64_t u64Val;
3309 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3310 AssertRC(rc);
3311
3312 pVCpu->cpum.GstCtx.rip = u64Val;
3313
3314 return u64Val;
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3330 {
3331 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3332 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3333 }
3334}
3335
3336
3337/**
3338 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3339 *
3340 * @param pVCpu The cross context virtual CPU structure.
3341 * @param pVmcsInfo The VMCS info. object.
3342 *
3343 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3344 * @remarks Do -not- call this function directly!
3345 */
3346DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3347{
3348 uint64_t fRFlags;
3349 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3350 AssertRC(rc);
3351
3352 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3353 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3354
3355 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3356#ifndef IN_NEM_DARWIN
3357 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3358 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3359 { /* mostly likely */ }
3360 else
3361 {
3362 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3363 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3364 }
3365#else
3366 RT_NOREF(pVmcsInfo);
3367#endif
3368}
3369
3370
3371/**
3372 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3373 *
3374 * @param pVCpu The cross context virtual CPU structure.
3375 * @param pVmcsInfo The VMCS info. object.
3376 *
3377 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3378 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3379 * instead!!!
3380 */
3381DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3382{
3383 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3384 {
3385 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3386 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3387 }
3388}
3389
3390
3391/**
3392 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3393 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3394 */
3395DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3396{
3397 /*
3398 * We must import RIP here to set our EM interrupt-inhibited state.
3399 * We also import RFLAGS as our code that evaluates pending interrupts
3400 * before VM-entry requires it.
3401 */
3402 vmxHCImportGuestRip(pVCpu);
3403 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3404
3405 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3406 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3407 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3408 pVCpu->cpum.GstCtx.rip);
3409 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3410}
3411
3412
3413/**
3414 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3415 * context.
3416 *
3417 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3418 *
3419 * @param pVCpu The cross context virtual CPU structure.
3420 * @param pVmcsInfo The VMCS info. object.
3421 *
3422 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3423 * do not log!
3424 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3425 * instead!!!
3426 */
3427DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3428{
3429 uint32_t u32Val;
3430 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3431 if (!u32Val)
3432 {
3433 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3434 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3435 }
3436 else
3437 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3438}
3439
3440
3441/**
3442 * Worker for VMXR0ImportStateOnDemand.
3443 *
3444 * @returns VBox status code.
3445 * @param pVCpu The cross context virtual CPU structure.
3446 * @param pVmcsInfo The VMCS info. object.
3447 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3448 */
3449static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3450{
3451 int rc = VINF_SUCCESS;
3452 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3453 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3454 uint32_t u32Val;
3455
3456 /*
3457 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3458 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3459 * neither are other host platforms.
3460 *
3461 * Committing this temporarily as it prevents BSOD.
3462 *
3463 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3464 */
3465#ifdef RT_OS_WINDOWS
3466 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3467 return VERR_HM_IPE_1;
3468#endif
3469
3470 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3471
3472#ifndef IN_NEM_DARWIN
3473 /*
3474 * We disable interrupts to make the updating of the state and in particular
3475 * the fExtrn modification atomic wrt to preemption hooks.
3476 */
3477 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3478#endif
3479
3480 fWhat &= pCtx->fExtrn;
3481 if (fWhat)
3482 {
3483 do
3484 {
3485 if (fWhat & CPUMCTX_EXTRN_RIP)
3486 vmxHCImportGuestRip(pVCpu);
3487
3488 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3489 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3490
3491 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3492 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3493 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3494
3495 if (fWhat & CPUMCTX_EXTRN_RSP)
3496 {
3497 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3498 AssertRC(rc);
3499 }
3500
3501 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3502 {
3503 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3504#ifndef IN_NEM_DARWIN
3505 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3506#else
3507 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3508#endif
3509 if (fWhat & CPUMCTX_EXTRN_CS)
3510 {
3511 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3512 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3513 if (fRealOnV86Active)
3514 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3515 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3516 }
3517 if (fWhat & CPUMCTX_EXTRN_SS)
3518 {
3519 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3520 if (fRealOnV86Active)
3521 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3522 }
3523 if (fWhat & CPUMCTX_EXTRN_DS)
3524 {
3525 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3526 if (fRealOnV86Active)
3527 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3528 }
3529 if (fWhat & CPUMCTX_EXTRN_ES)
3530 {
3531 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3532 if (fRealOnV86Active)
3533 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3534 }
3535 if (fWhat & CPUMCTX_EXTRN_FS)
3536 {
3537 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3538 if (fRealOnV86Active)
3539 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3540 }
3541 if (fWhat & CPUMCTX_EXTRN_GS)
3542 {
3543 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3544 if (fRealOnV86Active)
3545 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3546 }
3547 }
3548
3549 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3550 {
3551 if (fWhat & CPUMCTX_EXTRN_LDTR)
3552 vmxHCImportGuestLdtr(pVCpu);
3553
3554 if (fWhat & CPUMCTX_EXTRN_GDTR)
3555 {
3556 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3557 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3558 pCtx->gdtr.cbGdt = u32Val;
3559 }
3560
3561 /* Guest IDTR. */
3562 if (fWhat & CPUMCTX_EXTRN_IDTR)
3563 {
3564 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3565 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3566 pCtx->idtr.cbIdt = u32Val;
3567 }
3568
3569 /* Guest TR. */
3570 if (fWhat & CPUMCTX_EXTRN_TR)
3571 {
3572#ifndef IN_NEM_DARWIN
3573 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3574 don't need to import that one. */
3575 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3576#endif
3577 vmxHCImportGuestTr(pVCpu);
3578 }
3579 }
3580
3581 if (fWhat & CPUMCTX_EXTRN_DR7)
3582 {
3583#ifndef IN_NEM_DARWIN
3584 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3585#endif
3586 {
3587 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3588 AssertRC(rc);
3589 }
3590 }
3591
3592 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3593 {
3594 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3595 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3596 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3597 pCtx->SysEnter.cs = u32Val;
3598 }
3599
3600#ifndef IN_NEM_DARWIN
3601 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3602 {
3603 if ( pVM->hmr0.s.fAllow64BitGuests
3604 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3605 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3606 }
3607
3608 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3609 {
3610 if ( pVM->hmr0.s.fAllow64BitGuests
3611 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3612 {
3613 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3614 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3615 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3616 }
3617 }
3618
3619 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3620 {
3621 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3622 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3623 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3624 Assert(pMsrs);
3625 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3626 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3627 for (uint32_t i = 0; i < cMsrs; i++)
3628 {
3629 uint32_t const idMsr = pMsrs[i].u32Msr;
3630 switch (idMsr)
3631 {
3632 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3633 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3634 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3635 default:
3636 {
3637 uint32_t idxLbrMsr;
3638 if (VM_IS_VMX_LBR(pVM))
3639 {
3640 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3641 {
3642 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3643 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3644 break;
3645 }
3646 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3647 {
3648 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3649 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3650 break;
3651 }
3652 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3653 {
3654 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3655 break;
3656 }
3657 /* Fallthru (no break) */
3658 }
3659 pCtx->fExtrn = 0;
3660 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3661 ASMSetFlags(fEFlags);
3662 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3663 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3664 }
3665 }
3666 }
3667 }
3668#endif
3669
3670 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3671 {
3672 if (fWhat & CPUMCTX_EXTRN_CR0)
3673 {
3674 uint64_t u64Cr0;
3675 uint64_t u64Shadow;
3676 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3677 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3678#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3679 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3680 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3681#else
3682 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3683 {
3684 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3685 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3686 }
3687 else
3688 {
3689 /*
3690 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3691 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3692 * re-construct CR0. See @bugref{9180#c95} for details.
3693 */
3694 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3695 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3696 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3697 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3698 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3699 }
3700#endif
3701#ifndef IN_NEM_DARWIN
3702 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3703#endif
3704 CPUMSetGuestCR0(pVCpu, u64Cr0);
3705#ifndef IN_NEM_DARWIN
3706 VMMRZCallRing3Enable(pVCpu);
3707#endif
3708 }
3709
3710 if (fWhat & CPUMCTX_EXTRN_CR4)
3711 {
3712 uint64_t u64Cr4;
3713 uint64_t u64Shadow;
3714 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3715 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3716#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3717 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3718 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3719#else
3720 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3721 {
3722 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3723 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3724 }
3725 else
3726 {
3727 /*
3728 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3729 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3730 * re-construct CR4. See @bugref{9180#c95} for details.
3731 */
3732 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3733 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3734 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3735 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3736 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3737 }
3738#endif
3739 pCtx->cr4 = u64Cr4;
3740 }
3741
3742 if (fWhat & CPUMCTX_EXTRN_CR3)
3743 {
3744 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3745 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3746 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3747 && CPUMIsGuestPagingEnabledEx(pCtx)))
3748 {
3749 uint64_t u64Cr3;
3750 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3751 if (pCtx->cr3 != u64Cr3)
3752 {
3753 pCtx->cr3 = u64Cr3;
3754 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3755 }
3756
3757 /*
3758 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3759 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3760 */
3761 if (CPUMIsGuestInPAEModeEx(pCtx))
3762 {
3763 X86PDPE aPaePdpes[4];
3764 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3765 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3766 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3767 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3768 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3769 {
3770 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3771 /* PGM now updates PAE PDPTEs while updating CR3. */
3772 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3773 }
3774 }
3775 }
3776 }
3777 }
3778
3779#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3780 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3781 {
3782 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3783 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3784 {
3785 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3786 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3787 if (RT_SUCCESS(rc))
3788 { /* likely */ }
3789 else
3790 break;
3791 }
3792 }
3793#endif
3794 } while (0);
3795
3796 if (RT_SUCCESS(rc))
3797 {
3798 /* Update fExtrn. */
3799 pCtx->fExtrn &= ~fWhat;
3800
3801 /* If everything has been imported, clear the HM keeper bit. */
3802 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3803 {
3804#ifndef IN_NEM_DARWIN
3805 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3806#else
3807 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3808#endif
3809 Assert(!pCtx->fExtrn);
3810 }
3811 }
3812 }
3813#ifndef IN_NEM_DARWIN
3814 else
3815 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3816
3817 /*
3818 * Restore interrupts.
3819 */
3820 ASMSetFlags(fEFlags);
3821#endif
3822
3823 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3824
3825 if (RT_SUCCESS(rc))
3826 { /* likely */ }
3827 else
3828 return rc;
3829
3830 /*
3831 * Honor any pending CR3 updates.
3832 *
3833 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3834 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3835 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3836 *
3837 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3838 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3839 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3840 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3841 *
3842 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3843 *
3844 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3845 */
3846 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3847#ifndef IN_NEM_DARWIN
3848 && VMMRZCallRing3IsEnabled(pVCpu)
3849#endif
3850 )
3851 {
3852 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3853 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3854 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3855 }
3856
3857 return VINF_SUCCESS;
3858}
3859
3860
3861/**
3862 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3863 *
3864 * @returns VBox status code.
3865 * @param pVCpu The cross context virtual CPU structure.
3866 * @param pVmcsInfo The VMCS info. object.
3867 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3868 * in NEM/darwin context.
3869 * @tparam a_fWhat What to import, zero or more bits from
3870 * HMVMX_CPUMCTX_EXTRN_ALL.
3871 */
3872template<uint64_t const a_fWhat>
3873static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3874{
3875 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3876 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3877 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3878 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3879
3880 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3881
3882 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3883
3884 /* RIP and RFLAGS may have been imported already by the post exit code
3885 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3886 of the code is skipping this part of the code. */
3887 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3888 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3889 {
3890 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3891 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3892
3893 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3894 {
3895 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3896 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3897 else
3898 vmxHCImportGuestCoreRip(pVCpu);
3899 }
3900 }
3901
3902 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3903 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3904 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3905
3906 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3907 {
3908 if (a_fWhat & CPUMCTX_EXTRN_CS)
3909 {
3910 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3911 /** @todo try get rid of this carp, it smells and is probably never ever
3912 * used: */
3913 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3914 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3915 {
3916 vmxHCImportGuestCoreRip(pVCpu);
3917 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3918 }
3919 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3920 }
3921 if (a_fWhat & CPUMCTX_EXTRN_SS)
3922 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3923 if (a_fWhat & CPUMCTX_EXTRN_DS)
3924 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3925 if (a_fWhat & CPUMCTX_EXTRN_ES)
3926 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3927 if (a_fWhat & CPUMCTX_EXTRN_FS)
3928 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3929 if (a_fWhat & CPUMCTX_EXTRN_GS)
3930 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3931
3932 /* Guest TR.
3933 Real-mode emulation using virtual-8086 mode has the fake TSS
3934 (pRealModeTSS) in TR, don't need to import that one. */
3935#ifndef IN_NEM_DARWIN
3936 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3937 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3938 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3939#else
3940 if (a_fWhat & CPUMCTX_EXTRN_TR)
3941#endif
3942 vmxHCImportGuestTr(pVCpu);
3943
3944#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3945 if (fRealOnV86Active)
3946 {
3947 if (a_fWhat & CPUMCTX_EXTRN_CS)
3948 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3949 if (a_fWhat & CPUMCTX_EXTRN_SS)
3950 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3951 if (a_fWhat & CPUMCTX_EXTRN_DS)
3952 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3953 if (a_fWhat & CPUMCTX_EXTRN_ES)
3954 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3955 if (a_fWhat & CPUMCTX_EXTRN_FS)
3956 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3957 if (a_fWhat & CPUMCTX_EXTRN_GS)
3958 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3959 }
3960#endif
3961 }
3962
3963 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3964 {
3965 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3966 AssertRC(rc);
3967 }
3968
3969 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3970 vmxHCImportGuestLdtr(pVCpu);
3971
3972 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3973 {
3974 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3975 uint32_t u32Val;
3976 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3977 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3978 }
3979
3980 /* Guest IDTR. */
3981 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3982 {
3983 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3984 uint32_t u32Val;
3985 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3986 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3987 }
3988
3989 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3990 {
3991#ifndef IN_NEM_DARWIN
3992 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3993#endif
3994 {
3995 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3996 AssertRC(rc);
3997 }
3998 }
3999
4000 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4001 {
4002 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4003 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4004 uint32_t u32Val;
4005 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4006 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4007 }
4008
4009#ifndef IN_NEM_DARWIN
4010 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4011 {
4012 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4013 && pVM->hmr0.s.fAllow64BitGuests)
4014 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4015 }
4016
4017 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4018 {
4019 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4020 && pVM->hmr0.s.fAllow64BitGuests)
4021 {
4022 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4023 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4024 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4025 }
4026 }
4027
4028 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4029 {
4030 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4031 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4032 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4033 Assert(pMsrs);
4034 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4035 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4036 for (uint32_t i = 0; i < cMsrs; i++)
4037 {
4038 uint32_t const idMsr = pMsrs[i].u32Msr;
4039 switch (idMsr)
4040 {
4041 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4042 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4043 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4044 default:
4045 {
4046 uint32_t idxLbrMsr;
4047 if (VM_IS_VMX_LBR(pVM))
4048 {
4049 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4050 {
4051 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4052 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4053 break;
4054 }
4055 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4056 {
4057 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4058 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4059 break;
4060 }
4061 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4062 {
4063 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4064 break;
4065 }
4066 }
4067 pVCpu->cpum.GstCtx.fExtrn = 0;
4068 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4069 ASMSetFlags(fEFlags);
4070 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4071 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4072 }
4073 }
4074 }
4075 }
4076#endif
4077
4078 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4079 {
4080 uint64_t u64Cr0;
4081 uint64_t u64Shadow;
4082 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4083 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4084#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4085 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4086 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4087#else
4088 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4089 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4090 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4091 else
4092 {
4093 /*
4094 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4095 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4096 * re-construct CR0. See @bugref{9180#c95} for details.
4097 */
4098 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4099 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4100 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
4101 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4102 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4103 Assert(u64Cr0 & X86_CR0_NE);
4104 }
4105#endif
4106#ifndef IN_NEM_DARWIN
4107 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4108#endif
4109 CPUMSetGuestCR0(pVCpu, u64Cr0);
4110#ifndef IN_NEM_DARWIN
4111 VMMRZCallRing3Enable(pVCpu);
4112#endif
4113 }
4114
4115 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4116 {
4117 uint64_t u64Cr4;
4118 uint64_t u64Shadow;
4119 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4120 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4121#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4122 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4123 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4124#else
4125 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4126 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4127 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4128 else
4129 {
4130 /*
4131 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4132 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4133 * re-construct CR4. See @bugref{9180#c95} for details.
4134 */
4135 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4136 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4137 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
4138 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4139 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4140 Assert(u64Cr4 & X86_CR4_VMXE);
4141 }
4142#endif
4143 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4144 }
4145
4146 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4147 {
4148 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4149 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4150 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4151 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4152 {
4153 uint64_t u64Cr3;
4154 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4155 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4156 {
4157 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4158 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4159 }
4160
4161 /*
4162 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4163 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4164 */
4165 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4166 {
4167 X86PDPE aPaePdpes[4];
4168 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4169 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4170 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4171 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4172 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4173 {
4174 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4175 /* PGM now updates PAE PDPTEs while updating CR3. */
4176 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4177 }
4178 }
4179 }
4180 }
4181
4182#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4183 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4184 {
4185 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4186 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4187 {
4188 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4189 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4190 AssertRCReturn(rc, rc);
4191 }
4192 }
4193#endif
4194
4195 /* Update fExtrn. */
4196 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4197
4198 /* If everything has been imported, clear the HM keeper bit. */
4199 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4200 {
4201#ifndef IN_NEM_DARWIN
4202 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4203#else
4204 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4205#endif
4206 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4207 }
4208
4209 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4210
4211 /*
4212 * Honor any pending CR3 updates.
4213 *
4214 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4215 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4216 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4217 *
4218 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4219 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4220 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4221 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4222 *
4223 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4224 *
4225 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4226 */
4227#ifndef IN_NEM_DARWIN
4228 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4229 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4230 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4231 return VINF_SUCCESS;
4232 ASMSetFlags(fEFlags);
4233#else
4234 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4235 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4236 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4237 return VINF_SUCCESS;
4238 RT_NOREF_PV(fEFlags);
4239#endif
4240
4241 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4242 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4243 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4244 return VINF_SUCCESS;
4245}
4246
4247
4248/**
4249 * Internal state fetcher.
4250 *
4251 * @returns VBox status code.
4252 * @param pVCpu The cross context virtual CPU structure.
4253 * @param pVmcsInfo The VMCS info. object.
4254 * @param pszCaller For logging.
4255 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4256 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4257 * already. This is ORed together with @a a_fWhat when
4258 * calculating what needs fetching (just for safety).
4259 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4260 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4261 * already. This is ORed together with @a a_fWhat when
4262 * calculating what needs fetching (just for safety).
4263 */
4264template<uint64_t const a_fWhat,
4265 uint64_t const a_fDoneLocal = 0,
4266 uint64_t const a_fDonePostExit = 0
4267#ifndef IN_NEM_DARWIN
4268 | CPUMCTX_EXTRN_INHIBIT_INT
4269 | CPUMCTX_EXTRN_INHIBIT_NMI
4270# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4271 | HMVMX_CPUMCTX_EXTRN_ALL
4272# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4273 | CPUMCTX_EXTRN_RFLAGS
4274# endif
4275#else /* IN_NEM_DARWIN */
4276 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4277#endif /* IN_NEM_DARWIN */
4278>
4279DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4280{
4281 RT_NOREF_PV(pszCaller);
4282 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4283 {
4284#ifndef IN_NEM_DARWIN
4285 /*
4286 * We disable interrupts to make the updating of the state and in particular
4287 * the fExtrn modification atomic wrt to preemption hooks.
4288 */
4289 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4290#else
4291 RTCCUINTREG const fEFlags = 0;
4292#endif
4293
4294 /*
4295 * We combine all three parameters and take the (probably) inlined optimized
4296 * code path for the new things specified in a_fWhat.
4297 *
4298 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4299 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4300 * also take the streamlined path when both of these are cleared in fExtrn
4301 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4302 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4303 */
4304 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4305 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4306 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4307 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4308 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4309 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4310 {
4311 int const rc = vmxHCImportGuestStateInner< a_fWhat
4312 & HMVMX_CPUMCTX_EXTRN_ALL
4313 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4314#ifndef IN_NEM_DARWIN
4315 ASMSetFlags(fEFlags);
4316#endif
4317 return rc;
4318 }
4319
4320#ifndef IN_NEM_DARWIN
4321 ASMSetFlags(fEFlags);
4322#endif
4323
4324 /*
4325 * We shouldn't normally get here, but it may happen when executing
4326 * in the debug run-loops. Typically, everything should already have
4327 * been fetched then. Otherwise call the fallback state import function.
4328 */
4329 if (fWhatToDo == 0)
4330 { /* hope the cause was the debug loop or something similar */ }
4331 else
4332 {
4333 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4334 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4335 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4336 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4337 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4338 }
4339 }
4340 return VINF_SUCCESS;
4341}
4342
4343
4344/**
4345 * Check per-VM and per-VCPU force flag actions that require us to go back to
4346 * ring-3 for one reason or another.
4347 *
4348 * @returns Strict VBox status code (i.e. informational status codes too)
4349 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4350 * ring-3.
4351 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4352 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4353 * interrupts)
4354 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4355 * all EMTs to be in ring-3.
4356 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4357 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4358 * to the EM loop.
4359 *
4360 * @param pVCpu The cross context virtual CPU structure.
4361 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4362 * @param fStepping Whether we are single-stepping the guest using the
4363 * hypervisor debugger.
4364 *
4365 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4366 * is no longer in VMX non-root mode.
4367 */
4368static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4369{
4370#ifndef IN_NEM_DARWIN
4371 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4372#endif
4373
4374 /*
4375 * Update pending interrupts into the APIC's IRR.
4376 */
4377 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4378 APICUpdatePendingInterrupts(pVCpu);
4379
4380 /*
4381 * Anything pending? Should be more likely than not if we're doing a good job.
4382 */
4383 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4384 if ( !fStepping
4385 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4386 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4387 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4388 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4389 return VINF_SUCCESS;
4390
4391 /* Pending PGM C3 sync. */
4392 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4393 {
4394 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4395 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4396 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4397 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4398 if (rcStrict != VINF_SUCCESS)
4399 {
4400 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4401 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4402 return rcStrict;
4403 }
4404 }
4405
4406 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4407 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4408 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4409 {
4410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4411 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4412 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4413 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4414 return rc;
4415 }
4416
4417 /* Pending VM request packets, such as hardware interrupts. */
4418 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4419 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4420 {
4421 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4422 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4423 return VINF_EM_PENDING_REQUEST;
4424 }
4425
4426 /* Pending PGM pool flushes. */
4427 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4428 {
4429 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4430 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4431 return VINF_PGM_POOL_FLUSH_PENDING;
4432 }
4433
4434 /* Pending DMA requests. */
4435 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4436 {
4437 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4438 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4439 return VINF_EM_RAW_TO_R3;
4440 }
4441
4442#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4443 /*
4444 * Pending nested-guest events.
4445 *
4446 * Please note the priority of these events are specified and important.
4447 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4448 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4449 */
4450 if (fIsNestedGuest)
4451 {
4452 /* Pending nested-guest APIC-write. */
4453 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4454 {
4455 Log4Func(("Pending nested-guest APIC-write\n"));
4456 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4457 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4458 return rcStrict;
4459 }
4460
4461 /* Pending nested-guest monitor-trap flag (MTF). */
4462 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4463 {
4464 Log4Func(("Pending nested-guest MTF\n"));
4465 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4466 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4467 return rcStrict;
4468 }
4469
4470 /* Pending nested-guest VMX-preemption timer expired. */
4471 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4472 {
4473 Log4Func(("Pending nested-guest preempt timer\n"));
4474 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4475 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4476 return rcStrict;
4477 }
4478 }
4479#else
4480 NOREF(fIsNestedGuest);
4481#endif
4482
4483 return VINF_SUCCESS;
4484}
4485
4486
4487/**
4488 * Converts any TRPM trap into a pending HM event. This is typically used when
4489 * entering from ring-3 (not longjmp returns).
4490 *
4491 * @param pVCpu The cross context virtual CPU structure.
4492 */
4493static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4494{
4495 Assert(TRPMHasTrap(pVCpu));
4496 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4497
4498 uint8_t uVector;
4499 TRPMEVENT enmTrpmEvent;
4500 uint32_t uErrCode;
4501 RTGCUINTPTR GCPtrFaultAddress;
4502 uint8_t cbInstr;
4503 bool fIcebp;
4504
4505 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4506 AssertRC(rc);
4507
4508 uint32_t u32IntInfo;
4509 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4510 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4511
4512 rc = TRPMResetTrap(pVCpu);
4513 AssertRC(rc);
4514 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4515 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4516
4517 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4518}
4519
4520
4521/**
4522 * Converts the pending HM event into a TRPM trap.
4523 *
4524 * @param pVCpu The cross context virtual CPU structure.
4525 */
4526static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4527{
4528 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4529
4530 /* If a trap was already pending, we did something wrong! */
4531 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4532
4533 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4534 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4535 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4536
4537 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4538
4539 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4540 AssertRC(rc);
4541
4542 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4543 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4544
4545 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4546 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4547 else
4548 {
4549 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4550 switch (uVectorType)
4551 {
4552 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4553 TRPMSetTrapDueToIcebp(pVCpu);
4554 RT_FALL_THRU();
4555 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4556 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4557 {
4558 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4559 || ( uVector == X86_XCPT_BP /* INT3 */
4560 || uVector == X86_XCPT_OF /* INTO */
4561 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4562 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4563 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4564 break;
4565 }
4566 }
4567 }
4568
4569 /* We're now done converting the pending event. */
4570 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4571}
4572
4573
4574/**
4575 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4576 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4577 *
4578 * @param pVCpu The cross context virtual CPU structure.
4579 * @param pVmcsInfo The VMCS info. object.
4580 */
4581static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4582{
4583 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4584 {
4585 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4586 {
4587 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4588 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4589 AssertRC(rc);
4590 }
4591 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4592}
4593
4594
4595/**
4596 * Clears the interrupt-window exiting control in the VMCS.
4597 *
4598 * @param pVCpu The cross context virtual CPU structure.
4599 * @param pVmcsInfo The VMCS info. object.
4600 */
4601DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4602{
4603 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4604 {
4605 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4606 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4607 AssertRC(rc);
4608 }
4609}
4610
4611
4612/**
4613 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4614 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4615 *
4616 * @param pVCpu The cross context virtual CPU structure.
4617 * @param pVmcsInfo The VMCS info. object.
4618 */
4619static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4620{
4621 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4622 {
4623 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4624 {
4625 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4626 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4627 AssertRC(rc);
4628 Log4Func(("Setup NMI-window exiting\n"));
4629 }
4630 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4631}
4632
4633
4634/**
4635 * Clears the NMI-window exiting control in the VMCS.
4636 *
4637 * @param pVCpu The cross context virtual CPU structure.
4638 * @param pVmcsInfo The VMCS info. object.
4639 */
4640DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4641{
4642 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4643 {
4644 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4645 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4646 AssertRC(rc);
4647 }
4648}
4649
4650
4651/**
4652 * Injects an event into the guest upon VM-entry by updating the relevant fields
4653 * in the VM-entry area in the VMCS.
4654 *
4655 * @returns Strict VBox status code (i.e. informational status codes too).
4656 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4657 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4658 *
4659 * @param pVCpu The cross context virtual CPU structure.
4660 * @param pVmcsInfo The VMCS info object.
4661 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4662 * @param pEvent The event being injected.
4663 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4664 * will be updated if necessary. This cannot not be NULL.
4665 * @param fStepping Whether we're single-stepping guest execution and should
4666 * return VINF_EM_DBG_STEPPED if the event is injected
4667 * directly (registers modified by us, not by hardware on
4668 * VM-entry).
4669 */
4670static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4671 bool fStepping, uint32_t *pfIntrState)
4672{
4673 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4674 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4675 Assert(pfIntrState);
4676
4677#ifdef IN_NEM_DARWIN
4678 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4679#endif
4680
4681 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4682 uint32_t u32IntInfo = pEvent->u64IntInfo;
4683 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4684 uint32_t const cbInstr = pEvent->cbInstr;
4685 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4686 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4687 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4688
4689#ifdef VBOX_STRICT
4690 /*
4691 * Validate the error-code-valid bit for hardware exceptions.
4692 * No error codes for exceptions in real-mode.
4693 *
4694 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4695 */
4696 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4697 && !CPUMIsGuestInRealModeEx(pCtx))
4698 {
4699 switch (uVector)
4700 {
4701 case X86_XCPT_PF:
4702 case X86_XCPT_DF:
4703 case X86_XCPT_TS:
4704 case X86_XCPT_NP:
4705 case X86_XCPT_SS:
4706 case X86_XCPT_GP:
4707 case X86_XCPT_AC:
4708 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4709 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4710 RT_FALL_THRU();
4711 default:
4712 break;
4713 }
4714 }
4715
4716 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4717 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4718 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4719#endif
4720
4721 RT_NOREF(uVector);
4722 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4723 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4724 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4725 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4726 {
4727 Assert(uVector <= X86_XCPT_LAST);
4728 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4729 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4730 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4731 }
4732 else
4733 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4734
4735 /*
4736 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4737 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4738 * interrupt handler in the (real-mode) guest.
4739 *
4740 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4741 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4742 */
4743 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4744 {
4745#ifndef IN_NEM_DARWIN
4746 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4747#endif
4748 {
4749 /*
4750 * For CPUs with unrestricted guest execution enabled and with the guest
4751 * in real-mode, we must not set the deliver-error-code bit.
4752 *
4753 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4754 */
4755 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4756 }
4757#ifndef IN_NEM_DARWIN
4758 else
4759 {
4760 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4761 Assert(PDMVmmDevHeapIsEnabled(pVM));
4762 Assert(pVM->hm.s.vmx.pRealModeTSS);
4763 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4764
4765 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4766 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4767 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4768 AssertRCReturn(rc2, rc2);
4769
4770 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4771 size_t const cbIdtEntry = sizeof(X86IDTR16);
4772 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4773 {
4774 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4775 if (uVector == X86_XCPT_DF)
4776 return VINF_EM_RESET;
4777
4778 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4779 No error codes for exceptions in real-mode. */
4780 if (uVector == X86_XCPT_GP)
4781 {
4782 static HMEVENT const s_EventXcptDf
4783 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4784 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4785 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4786 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4787 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4788 }
4789
4790 /*
4791 * If we're injecting an event with no valid IDT entry, inject a #GP.
4792 * No error codes for exceptions in real-mode.
4793 *
4794 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4795 */
4796 static HMEVENT const s_EventXcptGp
4797 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4798 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4799 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4800 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4801 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4802 }
4803
4804 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4805 uint16_t uGuestIp = pCtx->ip;
4806 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4807 {
4808 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4809 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4810 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4811 }
4812 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4813 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4814
4815 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4816 X86IDTR16 IdtEntry;
4817 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4818 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4819 AssertRCReturn(rc2, rc2);
4820
4821 /* Construct the stack frame for the interrupt/exception handler. */
4822 VBOXSTRICTRC rcStrict;
4823 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4824 if (rcStrict == VINF_SUCCESS)
4825 {
4826 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4827 if (rcStrict == VINF_SUCCESS)
4828 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4829 }
4830
4831 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4832 if (rcStrict == VINF_SUCCESS)
4833 {
4834 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4835 pCtx->rip = IdtEntry.offSel;
4836 pCtx->cs.Sel = IdtEntry.uSel;
4837 pCtx->cs.ValidSel = IdtEntry.uSel;
4838 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4839 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4840 && uVector == X86_XCPT_PF)
4841 pCtx->cr2 = GCPtrFault;
4842
4843 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4844 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4845 | HM_CHANGED_GUEST_RSP);
4846
4847 /*
4848 * If we delivered a hardware exception (other than an NMI) and if there was
4849 * block-by-STI in effect, we should clear it.
4850 */
4851 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4852 {
4853 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4854 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4855 Log4Func(("Clearing inhibition due to STI\n"));
4856 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4857 }
4858
4859 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4860 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4861
4862 /*
4863 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4864 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4865 */
4866 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4867
4868 /*
4869 * If we eventually support nested-guest execution without unrestricted guest execution,
4870 * we should set fInterceptEvents here.
4871 */
4872 Assert(!fIsNestedGuest);
4873
4874 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4875 if (fStepping)
4876 rcStrict = VINF_EM_DBG_STEPPED;
4877 }
4878 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4879 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4880 return rcStrict;
4881 }
4882#else
4883 RT_NOREF(pVmcsInfo);
4884#endif
4885 }
4886
4887 /*
4888 * Validate.
4889 */
4890 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4891 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4892
4893 /*
4894 * Inject the event into the VMCS.
4895 */
4896 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4897 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4898 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4899 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4900 AssertRC(rc);
4901
4902 /*
4903 * Update guest CR2 if this is a page-fault.
4904 */
4905 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4906 pCtx->cr2 = GCPtrFault;
4907
4908 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4909 return VINF_SUCCESS;
4910}
4911
4912
4913/**
4914 * Evaluates the event to be delivered to the guest and sets it as the pending
4915 * event.
4916 *
4917 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4918 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4919 * NOT restore these force-flags.
4920 *
4921 * @returns Strict VBox status code (i.e. informational status codes too).
4922 * @param pVCpu The cross context virtual CPU structure.
4923 * @param pVmcsInfo The VMCS information structure.
4924 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4925 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4926 */
4927static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4928{
4929 Assert(pfIntrState);
4930 Assert(!TRPMHasTrap(pVCpu));
4931
4932 /*
4933 * Compute/update guest-interruptibility state related FFs.
4934 * The FFs will be used below while evaluating events to be injected.
4935 */
4936 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4937
4938 /*
4939 * Evaluate if a new event needs to be injected.
4940 * An event that's already pending has already performed all necessary checks.
4941 */
4942 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4943 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4944 {
4945 /** @todo SMI. SMIs take priority over NMIs. */
4946
4947 /*
4948 * NMIs.
4949 * NMIs take priority over external interrupts.
4950 */
4951#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4952 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4953#endif
4954 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4955 {
4956 /*
4957 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4958 *
4959 * For a nested-guest, the FF always indicates the outer guest's ability to
4960 * receive an NMI while the guest-interruptibility state bit depends on whether
4961 * the nested-hypervisor is using virtual-NMIs.
4962 */
4963 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4964 {
4965#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4966 if ( fIsNestedGuest
4967 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4968 return IEMExecVmxVmexitXcptNmi(pVCpu);
4969#endif
4970 vmxHCSetPendingXcptNmi(pVCpu);
4971 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4972 Log4Func(("NMI pending injection\n"));
4973
4974 /* We've injected the NMI, bail. */
4975 return VINF_SUCCESS;
4976 }
4977 if (!fIsNestedGuest)
4978 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4979 }
4980
4981 /*
4982 * External interrupts (PIC/APIC).
4983 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4984 * We cannot re-request the interrupt from the controller again.
4985 */
4986 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4987 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4988 {
4989 Assert(!DBGFIsStepping(pVCpu));
4990 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4991 AssertRC(rc);
4992
4993 /*
4994 * We must not check EFLAGS directly when executing a nested-guest, use
4995 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4996 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4997 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4998 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4999 *
5000 * See Intel spec. 25.4.1 "Event Blocking".
5001 */
5002 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
5003 {
5004#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5005 if ( fIsNestedGuest
5006 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5007 {
5008 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5009 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5010 return rcStrict;
5011 }
5012#endif
5013 uint8_t u8Interrupt;
5014 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5015 if (RT_SUCCESS(rc))
5016 {
5017#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5018 if ( fIsNestedGuest
5019 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5020 {
5021 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5022 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5023 return rcStrict;
5024 }
5025#endif
5026 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5027 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5028 }
5029 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5030 {
5031 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5032
5033 if ( !fIsNestedGuest
5034 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5035 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5036 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5037
5038 /*
5039 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5040 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5041 * need to re-set this force-flag here.
5042 */
5043 }
5044 else
5045 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5046
5047 /* We've injected the interrupt or taken necessary action, bail. */
5048 return VINF_SUCCESS;
5049 }
5050 if (!fIsNestedGuest)
5051 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5052 }
5053 }
5054 else if (!fIsNestedGuest)
5055 {
5056 /*
5057 * An event is being injected or we are in an interrupt shadow. Check if another event is
5058 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5059 * the pending event.
5060 */
5061 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5062 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5063 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5064 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5065 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5066 }
5067 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5068
5069 return VINF_SUCCESS;
5070}
5071
5072
5073/**
5074 * Injects any pending events into the guest if the guest is in a state to
5075 * receive them.
5076 *
5077 * @returns Strict VBox status code (i.e. informational status codes too).
5078 * @param pVCpu The cross context virtual CPU structure.
5079 * @param pVmcsInfo The VMCS information structure.
5080 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5081 * @param fIntrState The VT-x guest-interruptibility state.
5082 * @param fStepping Whether we are single-stepping the guest using the
5083 * hypervisor debugger and should return
5084 * VINF_EM_DBG_STEPPED if the event was dispatched
5085 * directly.
5086 */
5087static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5088 uint32_t fIntrState, bool fStepping)
5089{
5090 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5091#ifndef IN_NEM_DARWIN
5092 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5093#endif
5094
5095#ifdef VBOX_STRICT
5096 /*
5097 * Verify guest-interruptibility state.
5098 *
5099 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5100 * since injecting an event may modify the interruptibility state and we must thus always
5101 * use fIntrState.
5102 */
5103 {
5104 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5105 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5106 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5107 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5108 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5109 Assert(!TRPMHasTrap(pVCpu));
5110 NOREF(fBlockMovSS); NOREF(fBlockSti);
5111 }
5112#endif
5113
5114 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5115 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5116 {
5117 /*
5118 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5119 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5120 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5121 *
5122 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5123 */
5124 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5125#ifdef VBOX_STRICT
5126 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5127 {
5128 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5129 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5130 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5131 }
5132 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5133 {
5134 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5135 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5136 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5137 }
5138#endif
5139 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5140 uIntType));
5141
5142 /*
5143 * Inject the event and get any changes to the guest-interruptibility state.
5144 *
5145 * The guest-interruptibility state may need to be updated if we inject the event
5146 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5147 */
5148 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5149 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5150
5151 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5152 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5153 else
5154 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5155 }
5156
5157 /*
5158 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5159 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5160 */
5161 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5162 && !fIsNestedGuest)
5163 {
5164 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5165
5166 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5167 {
5168 /*
5169 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5170 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5171 */
5172 Assert(!DBGFIsStepping(pVCpu));
5173 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5174 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5175 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5176 AssertRC(rc);
5177 }
5178 else
5179 {
5180 /*
5181 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5182 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5183 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5184 * we use MTF, so just make sure it's called before executing guest-code.
5185 */
5186 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5187 }
5188 }
5189 /* else: for nested-guest currently handling while merging controls. */
5190
5191 /*
5192 * Finally, update the guest-interruptibility state.
5193 *
5194 * This is required for the real-on-v86 software interrupt injection, for
5195 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5196 */
5197 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5198 AssertRC(rc);
5199
5200 /*
5201 * There's no need to clear the VM-entry interruption-information field here if we're not
5202 * injecting anything. VT-x clears the valid bit on every VM-exit.
5203 *
5204 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5205 */
5206
5207 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5208 return rcStrict;
5209}
5210
5211
5212/**
5213 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5214 * and update error record fields accordingly.
5215 *
5216 * @returns VMX_IGS_* error codes.
5217 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5218 * wrong with the guest state.
5219 *
5220 * @param pVCpu The cross context virtual CPU structure.
5221 * @param pVmcsInfo The VMCS info. object.
5222 *
5223 * @remarks This function assumes our cache of the VMCS controls
5224 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5225 */
5226static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5227{
5228#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5229#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5230
5231 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5232 uint32_t uError = VMX_IGS_ERROR;
5233 uint32_t u32IntrState = 0;
5234#ifndef IN_NEM_DARWIN
5235 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5236 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5237#else
5238 bool const fUnrestrictedGuest = true;
5239#endif
5240 do
5241 {
5242 int rc;
5243
5244 /*
5245 * Guest-interruptibility state.
5246 *
5247 * Read this first so that any check that fails prior to those that actually
5248 * require the guest-interruptibility state would still reflect the correct
5249 * VMCS value and avoids causing further confusion.
5250 */
5251 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5252 AssertRC(rc);
5253
5254 uint32_t u32Val;
5255 uint64_t u64Val;
5256
5257 /*
5258 * CR0.
5259 */
5260 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5261 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5262 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5263 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5264 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5265 if (fUnrestrictedGuest)
5266 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5267
5268 uint64_t u64GuestCr0;
5269 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5270 AssertRC(rc);
5271 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5272 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5273 if ( !fUnrestrictedGuest
5274 && (u64GuestCr0 & X86_CR0_PG)
5275 && !(u64GuestCr0 & X86_CR0_PE))
5276 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5277
5278 /*
5279 * CR4.
5280 */
5281 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5282 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5283 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5284
5285 uint64_t u64GuestCr4;
5286 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5287 AssertRC(rc);
5288 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5289 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5290
5291 /*
5292 * IA32_DEBUGCTL MSR.
5293 */
5294 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5295 AssertRC(rc);
5296 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5297 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5298 {
5299 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5300 }
5301 uint64_t u64DebugCtlMsr = u64Val;
5302
5303#ifdef VBOX_STRICT
5304 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5305 AssertRC(rc);
5306 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5307#endif
5308 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5309
5310 /*
5311 * RIP and RFLAGS.
5312 */
5313 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5314 AssertRC(rc);
5315 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5316 if ( !fLongModeGuest
5317 || !pCtx->cs.Attr.n.u1Long)
5318 {
5319 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5320 }
5321 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5322 * must be identical if the "IA-32e mode guest" VM-entry
5323 * control is 1 and CS.L is 1. No check applies if the
5324 * CPU supports 64 linear-address bits. */
5325
5326 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5327 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5328 AssertRC(rc);
5329 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5330 VMX_IGS_RFLAGS_RESERVED);
5331 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5332 uint32_t const u32Eflags = u64Val;
5333
5334 if ( fLongModeGuest
5335 || ( fUnrestrictedGuest
5336 && !(u64GuestCr0 & X86_CR0_PE)))
5337 {
5338 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5339 }
5340
5341 uint32_t u32EntryInfo;
5342 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5343 AssertRC(rc);
5344 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5345 {
5346 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5347 }
5348
5349 /*
5350 * 64-bit checks.
5351 */
5352 if (fLongModeGuest)
5353 {
5354 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5355 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5356 }
5357
5358 if ( !fLongModeGuest
5359 && (u64GuestCr4 & X86_CR4_PCIDE))
5360 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5361
5362 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5363 * 51:32 beyond the processor's physical-address width are 0. */
5364
5365 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5366 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5367 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5368
5369#ifndef IN_NEM_DARWIN
5370 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5371 AssertRC(rc);
5372 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5373
5374 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5375 AssertRC(rc);
5376 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5377#endif
5378
5379 /*
5380 * PERF_GLOBAL MSR.
5381 */
5382 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5383 {
5384 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5385 AssertRC(rc);
5386 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5387 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5388 }
5389
5390 /*
5391 * PAT MSR.
5392 */
5393 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5394 {
5395 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5396 AssertRC(rc);
5397 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5398 for (unsigned i = 0; i < 8; i++)
5399 {
5400 uint8_t u8Val = (u64Val & 0xff);
5401 if ( u8Val != 0 /* UC */
5402 && u8Val != 1 /* WC */
5403 && u8Val != 4 /* WT */
5404 && u8Val != 5 /* WP */
5405 && u8Val != 6 /* WB */
5406 && u8Val != 7 /* UC- */)
5407 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5408 u64Val >>= 8;
5409 }
5410 }
5411
5412 /*
5413 * EFER MSR.
5414 */
5415 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5416 {
5417 Assert(g_fHmVmxSupportsVmcsEfer);
5418 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5419 AssertRC(rc);
5420 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5421 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5422 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5423 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5424 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5425 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5426 * iemVmxVmentryCheckGuestState(). */
5427 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5428 || !(u64GuestCr0 & X86_CR0_PG)
5429 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5430 VMX_IGS_EFER_LMA_LME_MISMATCH);
5431 }
5432
5433 /*
5434 * Segment registers.
5435 */
5436 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5437 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5438 if (!(u32Eflags & X86_EFL_VM))
5439 {
5440 /* CS */
5441 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5442 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5443 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5444 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5445 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5446 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5447 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5448 /* CS cannot be loaded with NULL in protected mode. */
5449 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5450 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5451 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5452 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5453 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5454 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5455 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5456 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5457 else
5458 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5459
5460 /* SS */
5461 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5462 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5463 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5464 if ( !(pCtx->cr0 & X86_CR0_PE)
5465 || pCtx->cs.Attr.n.u4Type == 3)
5466 {
5467 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5468 }
5469
5470 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5471 {
5472 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5473 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5474 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5475 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5476 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5477 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5478 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5479 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5480 }
5481
5482 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5483 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5484 {
5485 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5486 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5487 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5488 || pCtx->ds.Attr.n.u4Type > 11
5489 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5490 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5491 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5492 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5493 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5494 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5495 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5496 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5497 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5498 }
5499 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5500 {
5501 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5502 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5503 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5504 || pCtx->es.Attr.n.u4Type > 11
5505 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5506 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5507 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5508 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5509 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5510 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5511 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5512 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5513 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5514 }
5515 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5516 {
5517 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5518 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5519 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5520 || pCtx->fs.Attr.n.u4Type > 11
5521 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5522 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5523 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5524 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5525 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5526 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5527 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5528 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5529 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5530 }
5531 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5532 {
5533 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5534 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5535 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5536 || pCtx->gs.Attr.n.u4Type > 11
5537 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5538 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5539 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5540 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5541 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5542 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5543 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5544 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5545 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5546 }
5547 /* 64-bit capable CPUs. */
5548 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5549 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5550 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5551 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5552 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5553 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5554 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5555 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5556 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5557 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5558 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5559 }
5560 else
5561 {
5562 /* V86 mode checks. */
5563 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5564 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5565 {
5566 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5567 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5568 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5569 }
5570 else
5571 {
5572 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5573 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5574 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5575 }
5576
5577 /* CS */
5578 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5579 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5580 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5581 /* SS */
5582 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5583 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5584 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5585 /* DS */
5586 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5587 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5588 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5589 /* ES */
5590 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5591 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5592 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5593 /* FS */
5594 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5595 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5596 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5597 /* GS */
5598 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5599 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5600 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5601 /* 64-bit capable CPUs. */
5602 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5603 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5604 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5605 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5606 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5607 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5608 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5609 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5610 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5611 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5612 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5613 }
5614
5615 /*
5616 * TR.
5617 */
5618 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5619 /* 64-bit capable CPUs. */
5620 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5621 if (fLongModeGuest)
5622 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5623 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5624 else
5625 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5626 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5627 VMX_IGS_TR_ATTR_TYPE_INVALID);
5628 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5629 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5630 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5631 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5632 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5633 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5634 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5635 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5636
5637 /*
5638 * GDTR and IDTR (64-bit capable checks).
5639 */
5640 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5641 AssertRC(rc);
5642 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5643
5644 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5645 AssertRC(rc);
5646 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5647
5648 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5649 AssertRC(rc);
5650 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5651
5652 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5653 AssertRC(rc);
5654 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5655
5656 /*
5657 * Guest Non-Register State.
5658 */
5659 /* Activity State. */
5660 uint32_t u32ActivityState;
5661 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5662 AssertRC(rc);
5663 HMVMX_CHECK_BREAK( !u32ActivityState
5664 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5665 VMX_IGS_ACTIVITY_STATE_INVALID);
5666 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5667 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5668
5669 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5670 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5671 {
5672 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5673 }
5674
5675 /** @todo Activity state and injecting interrupts. Left as a todo since we
5676 * currently don't use activity states but ACTIVE. */
5677
5678 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5679 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5680
5681 /* Guest interruptibility-state. */
5682 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5683 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5684 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5685 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5686 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5687 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5688 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5689 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5690 {
5691 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5692 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5693 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5694 }
5695 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5696 {
5697 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5698 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5699 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5700 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5701 }
5702 /** @todo Assumes the processor is not in SMM. */
5703 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5704 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5705 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5706 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5707 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5708 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5709 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5710 {
5711 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5712 }
5713
5714 /* Pending debug exceptions. */
5715 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5716 AssertRC(rc);
5717 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5718 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5719 u32Val = u64Val; /* For pending debug exceptions checks below. */
5720
5721 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5722 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5723 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5724 {
5725 if ( (u32Eflags & X86_EFL_TF)
5726 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5727 {
5728 /* Bit 14 is PendingDebug.BS. */
5729 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5730 }
5731 if ( !(u32Eflags & X86_EFL_TF)
5732 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5733 {
5734 /* Bit 14 is PendingDebug.BS. */
5735 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5736 }
5737 }
5738
5739#ifndef IN_NEM_DARWIN
5740 /* VMCS link pointer. */
5741 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5742 AssertRC(rc);
5743 if (u64Val != UINT64_C(0xffffffffffffffff))
5744 {
5745 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5746 /** @todo Bits beyond the processor's physical-address width MBZ. */
5747 /** @todo SMM checks. */
5748 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5749 Assert(pVmcsInfo->pvShadowVmcs);
5750 VMXVMCSREVID VmcsRevId;
5751 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5752 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5753 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5754 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5755 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5756 }
5757
5758 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5759 * not using nested paging? */
5760 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5761 && !fLongModeGuest
5762 && CPUMIsGuestInPAEModeEx(pCtx))
5763 {
5764 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5765 AssertRC(rc);
5766 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5767
5768 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5769 AssertRC(rc);
5770 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5771
5772 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5773 AssertRC(rc);
5774 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5775
5776 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5777 AssertRC(rc);
5778 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5779 }
5780#endif
5781
5782 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5783 if (uError == VMX_IGS_ERROR)
5784 uError = VMX_IGS_REASON_NOT_FOUND;
5785 } while (0);
5786
5787 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5788 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5789 return uError;
5790
5791#undef HMVMX_ERROR_BREAK
5792#undef HMVMX_CHECK_BREAK
5793}
5794
5795
5796#ifndef HMVMX_USE_FUNCTION_TABLE
5797/**
5798 * Handles a guest VM-exit from hardware-assisted VMX execution.
5799 *
5800 * @returns Strict VBox status code (i.e. informational status codes too).
5801 * @param pVCpu The cross context virtual CPU structure.
5802 * @param pVmxTransient The VMX-transient structure.
5803 */
5804DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5805{
5806#ifdef DEBUG_ramshankar
5807# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5808 do { \
5809 if (a_fSave != 0) \
5810 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5811 VBOXSTRICTRC rcStrict = a_CallExpr; \
5812 if (a_fSave != 0) \
5813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5814 return rcStrict; \
5815 } while (0)
5816#else
5817# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5818#endif
5819 uint32_t const uExitReason = pVmxTransient->uExitReason;
5820 switch (uExitReason)
5821 {
5822 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5823 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5824 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5825 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5826 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5827 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5828 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5829 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5830 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5831 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5832 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5833 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5834 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5835 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5836 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5837 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5838 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5839 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5840 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5841 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5842 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5843 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5844 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5845 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5846 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5847 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5848 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5849 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5850 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5851 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5852#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5853 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5854 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5855 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5856 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5857 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5858 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5859 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5860 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5861 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5862 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5863#else
5864 case VMX_EXIT_VMCLEAR:
5865 case VMX_EXIT_VMLAUNCH:
5866 case VMX_EXIT_VMPTRLD:
5867 case VMX_EXIT_VMPTRST:
5868 case VMX_EXIT_VMREAD:
5869 case VMX_EXIT_VMRESUME:
5870 case VMX_EXIT_VMWRITE:
5871 case VMX_EXIT_VMXOFF:
5872 case VMX_EXIT_VMXON:
5873 case VMX_EXIT_INVVPID:
5874 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5875#endif
5876#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5877 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5878#else
5879 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5880#endif
5881
5882 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5883 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5884 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5885
5886 case VMX_EXIT_INIT_SIGNAL:
5887 case VMX_EXIT_SIPI:
5888 case VMX_EXIT_IO_SMI:
5889 case VMX_EXIT_SMI:
5890 case VMX_EXIT_ERR_MSR_LOAD:
5891 case VMX_EXIT_ERR_MACHINE_CHECK:
5892 case VMX_EXIT_PML_FULL:
5893 case VMX_EXIT_VIRTUALIZED_EOI:
5894 case VMX_EXIT_GDTR_IDTR_ACCESS:
5895 case VMX_EXIT_LDTR_TR_ACCESS:
5896 case VMX_EXIT_APIC_WRITE:
5897 case VMX_EXIT_RDRAND:
5898 case VMX_EXIT_RSM:
5899 case VMX_EXIT_VMFUNC:
5900 case VMX_EXIT_ENCLS:
5901 case VMX_EXIT_RDSEED:
5902 case VMX_EXIT_XSAVES:
5903 case VMX_EXIT_XRSTORS:
5904 case VMX_EXIT_UMWAIT:
5905 case VMX_EXIT_TPAUSE:
5906 case VMX_EXIT_LOADIWKEY:
5907 default:
5908 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5909 }
5910#undef VMEXIT_CALL_RET
5911}
5912#endif /* !HMVMX_USE_FUNCTION_TABLE */
5913
5914
5915#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5916/**
5917 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5918 *
5919 * @returns Strict VBox status code (i.e. informational status codes too).
5920 * @param pVCpu The cross context virtual CPU structure.
5921 * @param pVmxTransient The VMX-transient structure.
5922 */
5923DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5924{
5925#ifdef DEBUG_ramshankar
5926# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5927 do { \
5928 if (a_fSave != 0) \
5929 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5930 VBOXSTRICTRC rcStrict = a_CallExpr; \
5931 return rcStrict; \
5932 } while (0)
5933#else
5934# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5935#endif
5936
5937 uint32_t const uExitReason = pVmxTransient->uExitReason;
5938 switch (uExitReason)
5939 {
5940# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5941 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
5942 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
5943# else
5944 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5945 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5946# endif
5947 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
5948 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
5949 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
5950
5951 /*
5952 * We shouldn't direct host physical interrupts to the nested-guest.
5953 */
5954 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5955
5956 /*
5957 * Instructions that cause VM-exits unconditionally or the condition is
5958 * always taken solely from the nested hypervisor (meaning if the VM-exit
5959 * happens, it's guaranteed to be a nested-guest VM-exit).
5960 *
5961 * - Provides VM-exit instruction length ONLY.
5962 */
5963 case VMX_EXIT_CPUID: /* Unconditional. */
5964 case VMX_EXIT_VMCALL:
5965 case VMX_EXIT_GETSEC:
5966 case VMX_EXIT_INVD:
5967 case VMX_EXIT_XSETBV:
5968 case VMX_EXIT_VMLAUNCH:
5969 case VMX_EXIT_VMRESUME:
5970 case VMX_EXIT_VMXOFF:
5971 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5972 case VMX_EXIT_VMFUNC:
5973 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
5974
5975 /*
5976 * Instructions that cause VM-exits unconditionally or the condition is
5977 * always taken solely from the nested hypervisor (meaning if the VM-exit
5978 * happens, it's guaranteed to be a nested-guest VM-exit).
5979 *
5980 * - Provides VM-exit instruction length.
5981 * - Provides VM-exit information.
5982 * - Optionally provides Exit qualification.
5983 *
5984 * Since Exit qualification is 0 for all VM-exits where it is not
5985 * applicable, reading and passing it to the guest should produce
5986 * defined behavior.
5987 *
5988 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5989 */
5990 case VMX_EXIT_INVEPT: /* Unconditional. */
5991 case VMX_EXIT_INVVPID:
5992 case VMX_EXIT_VMCLEAR:
5993 case VMX_EXIT_VMPTRLD:
5994 case VMX_EXIT_VMPTRST:
5995 case VMX_EXIT_VMXON:
5996 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5997 case VMX_EXIT_LDTR_TR_ACCESS:
5998 case VMX_EXIT_RDRAND:
5999 case VMX_EXIT_RDSEED:
6000 case VMX_EXIT_XSAVES:
6001 case VMX_EXIT_XRSTORS:
6002 case VMX_EXIT_UMWAIT:
6003 case VMX_EXIT_TPAUSE:
6004 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6005
6006 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6007 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6008 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6009 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6010 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6011 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6012 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6013 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6014 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6015 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6016 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6017 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6018 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6019 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6020 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6021 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6022 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6023 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6024 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6025
6026 case VMX_EXIT_PREEMPT_TIMER:
6027 {
6028 /** @todo NSTVMX: Preempt timer. */
6029 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6030 }
6031
6032 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6033 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6034
6035 case VMX_EXIT_VMREAD:
6036 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6037
6038 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6039 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6040
6041 case VMX_EXIT_INIT_SIGNAL:
6042 case VMX_EXIT_SIPI:
6043 case VMX_EXIT_IO_SMI:
6044 case VMX_EXIT_SMI:
6045 case VMX_EXIT_ERR_MSR_LOAD:
6046 case VMX_EXIT_ERR_MACHINE_CHECK:
6047 case VMX_EXIT_PML_FULL:
6048 case VMX_EXIT_RSM:
6049 default:
6050 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6051 }
6052#undef VMEXIT_CALL_RET
6053}
6054#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6055
6056
6057/** @name VM-exit helpers.
6058 * @{
6059 */
6060/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6061/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6062/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6063
6064/** Macro for VM-exits called unexpectedly. */
6065#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6066 do { \
6067 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6068 return VERR_VMX_UNEXPECTED_EXIT; \
6069 } while (0)
6070
6071#ifdef VBOX_STRICT
6072# ifndef IN_NEM_DARWIN
6073/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6074# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6075 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6076
6077# define HMVMX_ASSERT_PREEMPT_CPUID() \
6078 do { \
6079 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6080 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6081 } while (0)
6082
6083# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6084 do { \
6085 AssertPtr((a_pVCpu)); \
6086 AssertPtr((a_pVmxTransient)); \
6087 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6088 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6089 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6090 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6091 Assert((a_pVmxTransient)->pVmcsInfo); \
6092 Assert(ASMIntAreEnabled()); \
6093 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6094 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6095 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6096 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6097 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6098 HMVMX_ASSERT_PREEMPT_CPUID(); \
6099 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6100 } while (0)
6101# else
6102# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6103# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6104# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6105 do { \
6106 AssertPtr((a_pVCpu)); \
6107 AssertPtr((a_pVmxTransient)); \
6108 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6109 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6110 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6111 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6112 Assert((a_pVmxTransient)->pVmcsInfo); \
6113 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6114 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6115 } while (0)
6116# endif
6117
6118# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6119 do { \
6120 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6121 Assert((a_pVmxTransient)->fIsNestedGuest); \
6122 } while (0)
6123
6124# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6125 do { \
6126 Log4Func(("\n")); \
6127 } while (0)
6128#else
6129# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6130 do { \
6131 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6132 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6133 } while (0)
6134
6135# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6136 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6137
6138# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6139#endif
6140
6141#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6142/** Macro that does the necessary privilege checks and intercepted VM-exits for
6143 * guests that attempted to execute a VMX instruction. */
6144# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6145 do \
6146 { \
6147 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6148 if (rcStrictTmp == VINF_SUCCESS) \
6149 { /* likely */ } \
6150 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6151 { \
6152 Assert((a_pVCpu)->hm.s.Event.fPending); \
6153 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6154 return VINF_SUCCESS; \
6155 } \
6156 else \
6157 { \
6158 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6159 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6160 } \
6161 } while (0)
6162
6163/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6164# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6165 do \
6166 { \
6167 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6168 (a_pGCPtrEffAddr)); \
6169 if (rcStrictTmp == VINF_SUCCESS) \
6170 { /* likely */ } \
6171 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6172 { \
6173 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6174 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6175 NOREF(uXcptTmp); \
6176 return VINF_SUCCESS; \
6177 } \
6178 else \
6179 { \
6180 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6181 return rcStrictTmp; \
6182 } \
6183 } while (0)
6184#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6185
6186
6187/**
6188 * Advances the guest RIP by the specified number of bytes.
6189 *
6190 * @param pVCpu The cross context virtual CPU structure.
6191 * @param cbInstr Number of bytes to advance the RIP by.
6192 *
6193 * @remarks No-long-jump zone!!!
6194 */
6195DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6196{
6197 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6198
6199 /*
6200 * Advance RIP.
6201 *
6202 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6203 * when the addition causes a "carry" into the upper half and check whether
6204 * we're in 64-bit and can go on with it or wether we should zap the top
6205 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6206 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6207 *
6208 * See PC wrap around tests in bs3-cpu-weird-1.
6209 */
6210 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6211 uint64_t const uRipNext = uRipPrev + cbInstr;
6212 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6213 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6214 pVCpu->cpum.GstCtx.rip = uRipNext;
6215 else
6216 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6217
6218 /*
6219 * Clear RF and interrupt shadowing.
6220 */
6221 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6222 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6223 else
6224 {
6225 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6226 {
6227 /** @todo \#DB - single step. */
6228 }
6229 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6230 }
6231 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6232
6233 /* Mark both RIP and RFLAGS as updated. */
6234 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6235}
6236
6237
6238/**
6239 * Advances the guest RIP after reading it from the VMCS.
6240 *
6241 * @returns VBox status code, no informational status codes.
6242 * @param pVCpu The cross context virtual CPU structure.
6243 * @param pVmxTransient The VMX-transient structure.
6244 *
6245 * @remarks No-long-jump zone!!!
6246 */
6247static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6248{
6249 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6250 /** @todo consider template here after checking callers. */
6251 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6252 AssertRCReturn(rc, rc);
6253
6254 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6255 return VINF_SUCCESS;
6256}
6257
6258
6259/**
6260 * Handle a condition that occurred while delivering an event through the guest or
6261 * nested-guest IDT.
6262 *
6263 * @returns Strict VBox status code (i.e. informational status codes too).
6264 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6265 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6266 * to continue execution of the guest which will delivery the \#DF.
6267 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6268 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6269 *
6270 * @param pVCpu The cross context virtual CPU structure.
6271 * @param pVmxTransient The VMX-transient structure.
6272 *
6273 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6274 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6275 * is due to an EPT violation, PML full or SPP-related event.
6276 *
6277 * @remarks No-long-jump zone!!!
6278 */
6279static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6280{
6281 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6282 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6283 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6284 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6285 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6286 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6287
6288 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6289 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6290 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6291 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6292 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6293 {
6294 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6295 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6296
6297 /*
6298 * If the event was a software interrupt (generated with INT n) or a software exception
6299 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6300 * can handle the VM-exit and continue guest execution which will re-execute the
6301 * instruction rather than re-injecting the exception, as that can cause premature
6302 * trips to ring-3 before injection and involve TRPM which currently has no way of
6303 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6304 * the problem).
6305 */
6306 IEMXCPTRAISE enmRaise;
6307 IEMXCPTRAISEINFO fRaiseInfo;
6308 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6309 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6310 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6311 {
6312 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6313 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6314 }
6315 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6316 {
6317 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6318 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6319 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6320
6321 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6322 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6323
6324 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6325
6326 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6327 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6328 {
6329 pVmxTransient->fVectoringPF = true;
6330 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6331 }
6332 }
6333 else
6334 {
6335 /*
6336 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6337 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6338 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6339 */
6340 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6341 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6342 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6343 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6344 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6345 }
6346
6347 /*
6348 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6349 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6350 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6351 * subsequent VM-entry would fail, see @bugref{7445}.
6352 *
6353 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6354 */
6355 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6356 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6357 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6358 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6359 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6360
6361 switch (enmRaise)
6362 {
6363 case IEMXCPTRAISE_CURRENT_XCPT:
6364 {
6365 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6366 Assert(rcStrict == VINF_SUCCESS);
6367 break;
6368 }
6369
6370 case IEMXCPTRAISE_PREV_EVENT:
6371 {
6372 uint32_t u32ErrCode;
6373 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6374 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6375 else
6376 u32ErrCode = 0;
6377
6378 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6379 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6380 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6381 pVCpu->cpum.GstCtx.cr2);
6382
6383 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6384 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6385 Assert(rcStrict == VINF_SUCCESS);
6386 break;
6387 }
6388
6389 case IEMXCPTRAISE_REEXEC_INSTR:
6390 Assert(rcStrict == VINF_SUCCESS);
6391 break;
6392
6393 case IEMXCPTRAISE_DOUBLE_FAULT:
6394 {
6395 /*
6396 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6397 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6398 */
6399 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6400 {
6401 pVmxTransient->fVectoringDoublePF = true;
6402 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6403 pVCpu->cpum.GstCtx.cr2));
6404 rcStrict = VINF_SUCCESS;
6405 }
6406 else
6407 {
6408 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6409 vmxHCSetPendingXcptDF(pVCpu);
6410 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6411 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6412 rcStrict = VINF_HM_DOUBLE_FAULT;
6413 }
6414 break;
6415 }
6416
6417 case IEMXCPTRAISE_TRIPLE_FAULT:
6418 {
6419 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6420 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6421 rcStrict = VINF_EM_RESET;
6422 break;
6423 }
6424
6425 case IEMXCPTRAISE_CPU_HANG:
6426 {
6427 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6428 rcStrict = VERR_EM_GUEST_CPU_HANG;
6429 break;
6430 }
6431
6432 default:
6433 {
6434 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6435 rcStrict = VERR_VMX_IPE_2;
6436 break;
6437 }
6438 }
6439 }
6440 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6441 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6442 {
6443 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6444 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6445 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6446 {
6447 /*
6448 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6449 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6450 * that virtual NMIs remain blocked until the IRET execution is completed.
6451 *
6452 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6453 */
6454 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6455 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6456 }
6457 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6458 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6459 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6460 {
6461 /*
6462 * Execution of IRET caused an EPT violation, page-modification log-full event or
6463 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6464 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6465 * that virtual NMIs remain blocked until the IRET execution is completed.
6466 *
6467 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6468 */
6469 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6470 {
6471 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6472 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6473 }
6474 }
6475 }
6476
6477 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6478 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6479 return rcStrict;
6480}
6481
6482
6483#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6484/**
6485 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6486 * guest attempting to execute a VMX instruction.
6487 *
6488 * @returns Strict VBox status code (i.e. informational status codes too).
6489 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6490 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6491 *
6492 * @param pVCpu The cross context virtual CPU structure.
6493 * @param uExitReason The VM-exit reason.
6494 *
6495 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6496 * @remarks No-long-jump zone!!!
6497 */
6498static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6499{
6500 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6501 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6502
6503 /*
6504 * The physical CPU would have already checked the CPU mode/code segment.
6505 * We shall just assert here for paranoia.
6506 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6507 */
6508 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6509 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6510 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6511
6512 if (uExitReason == VMX_EXIT_VMXON)
6513 {
6514 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6515
6516 /*
6517 * We check CR4.VMXE because it is required to be always set while in VMX operation
6518 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6519 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6520 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6521 */
6522 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6523 {
6524 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6525 vmxHCSetPendingXcptUD(pVCpu);
6526 return VINF_HM_PENDING_XCPT;
6527 }
6528 }
6529 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6530 {
6531 /*
6532 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6533 * (other than VMXON), we need to raise a #UD.
6534 */
6535 Log4Func(("Not in VMX root mode -> #UD\n"));
6536 vmxHCSetPendingXcptUD(pVCpu);
6537 return VINF_HM_PENDING_XCPT;
6538 }
6539
6540 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6541 return VINF_SUCCESS;
6542}
6543
6544
6545/**
6546 * Decodes the memory operand of an instruction that caused a VM-exit.
6547 *
6548 * The Exit qualification field provides the displacement field for memory
6549 * operand instructions, if any.
6550 *
6551 * @returns Strict VBox status code (i.e. informational status codes too).
6552 * @retval VINF_SUCCESS if the operand was successfully decoded.
6553 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6554 * operand.
6555 * @param pVCpu The cross context virtual CPU structure.
6556 * @param uExitInstrInfo The VM-exit instruction information field.
6557 * @param enmMemAccess The memory operand's access type (read or write).
6558 * @param GCPtrDisp The instruction displacement field, if any. For
6559 * RIP-relative addressing pass RIP + displacement here.
6560 * @param pGCPtrMem Where to store the effective destination memory address.
6561 *
6562 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6563 * virtual-8086 mode hence skips those checks while verifying if the
6564 * segment is valid.
6565 */
6566static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6567 PRTGCPTR pGCPtrMem)
6568{
6569 Assert(pGCPtrMem);
6570 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6571 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6572 | CPUMCTX_EXTRN_CR0);
6573
6574 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6575 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6576 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6577
6578 VMXEXITINSTRINFO ExitInstrInfo;
6579 ExitInstrInfo.u = uExitInstrInfo;
6580 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6581 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6582 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6583 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6584 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6585 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6586 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6587 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6588 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6589
6590 /*
6591 * Validate instruction information.
6592 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6593 */
6594 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6595 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6596 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6597 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6598 AssertLogRelMsgReturn(fIsMemOperand,
6599 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6600
6601 /*
6602 * Compute the complete effective address.
6603 *
6604 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6605 * See AMD spec. 4.5.2 "Segment Registers".
6606 */
6607 RTGCPTR GCPtrMem = GCPtrDisp;
6608 if (fBaseRegValid)
6609 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6610 if (fIdxRegValid)
6611 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6612
6613 RTGCPTR const GCPtrOff = GCPtrMem;
6614 if ( !fIsLongMode
6615 || iSegReg >= X86_SREG_FS)
6616 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6617 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6618
6619 /*
6620 * Validate effective address.
6621 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6622 */
6623 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6624 Assert(cbAccess > 0);
6625 if (fIsLongMode)
6626 {
6627 if (X86_IS_CANONICAL(GCPtrMem))
6628 {
6629 *pGCPtrMem = GCPtrMem;
6630 return VINF_SUCCESS;
6631 }
6632
6633 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6634 * "Data Limit Checks in 64-bit Mode". */
6635 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6636 vmxHCSetPendingXcptGP(pVCpu, 0);
6637 return VINF_HM_PENDING_XCPT;
6638 }
6639
6640 /*
6641 * This is a watered down version of iemMemApplySegment().
6642 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6643 * and segment CPL/DPL checks are skipped.
6644 */
6645 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6646 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6647 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6648
6649 /* Check if the segment is present and usable. */
6650 if ( pSel->Attr.n.u1Present
6651 && !pSel->Attr.n.u1Unusable)
6652 {
6653 Assert(pSel->Attr.n.u1DescType);
6654 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6655 {
6656 /* Check permissions for the data segment. */
6657 if ( enmMemAccess == VMXMEMACCESS_WRITE
6658 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6659 {
6660 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6661 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6662 return VINF_HM_PENDING_XCPT;
6663 }
6664
6665 /* Check limits if it's a normal data segment. */
6666 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6667 {
6668 if ( GCPtrFirst32 > pSel->u32Limit
6669 || GCPtrLast32 > pSel->u32Limit)
6670 {
6671 Log4Func(("Data segment limit exceeded. "
6672 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6673 GCPtrLast32, pSel->u32Limit));
6674 if (iSegReg == X86_SREG_SS)
6675 vmxHCSetPendingXcptSS(pVCpu, 0);
6676 else
6677 vmxHCSetPendingXcptGP(pVCpu, 0);
6678 return VINF_HM_PENDING_XCPT;
6679 }
6680 }
6681 else
6682 {
6683 /* Check limits if it's an expand-down data segment.
6684 Note! The upper boundary is defined by the B bit, not the G bit! */
6685 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6686 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6687 {
6688 Log4Func(("Expand-down data segment limit exceeded. "
6689 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6690 GCPtrLast32, pSel->u32Limit));
6691 if (iSegReg == X86_SREG_SS)
6692 vmxHCSetPendingXcptSS(pVCpu, 0);
6693 else
6694 vmxHCSetPendingXcptGP(pVCpu, 0);
6695 return VINF_HM_PENDING_XCPT;
6696 }
6697 }
6698 }
6699 else
6700 {
6701 /* Check permissions for the code segment. */
6702 if ( enmMemAccess == VMXMEMACCESS_WRITE
6703 || ( enmMemAccess == VMXMEMACCESS_READ
6704 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6705 {
6706 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6707 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6708 vmxHCSetPendingXcptGP(pVCpu, 0);
6709 return VINF_HM_PENDING_XCPT;
6710 }
6711
6712 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6713 if ( GCPtrFirst32 > pSel->u32Limit
6714 || GCPtrLast32 > pSel->u32Limit)
6715 {
6716 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6717 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6718 if (iSegReg == X86_SREG_SS)
6719 vmxHCSetPendingXcptSS(pVCpu, 0);
6720 else
6721 vmxHCSetPendingXcptGP(pVCpu, 0);
6722 return VINF_HM_PENDING_XCPT;
6723 }
6724 }
6725 }
6726 else
6727 {
6728 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6729 vmxHCSetPendingXcptGP(pVCpu, 0);
6730 return VINF_HM_PENDING_XCPT;
6731 }
6732
6733 *pGCPtrMem = GCPtrMem;
6734 return VINF_SUCCESS;
6735}
6736#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6737
6738
6739/**
6740 * VM-exit helper for LMSW.
6741 */
6742static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6743{
6744 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6745 AssertRCReturn(rc, rc);
6746
6747 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6748 AssertMsg( rcStrict == VINF_SUCCESS
6749 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6750
6751 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6752 if (rcStrict == VINF_IEM_RAISED_XCPT)
6753 {
6754 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6755 rcStrict = VINF_SUCCESS;
6756 }
6757
6758 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6759 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6760 return rcStrict;
6761}
6762
6763
6764/**
6765 * VM-exit helper for CLTS.
6766 */
6767static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6768{
6769 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6770 AssertRCReturn(rc, rc);
6771
6772 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6773 AssertMsg( rcStrict == VINF_SUCCESS
6774 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6775
6776 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6777 if (rcStrict == VINF_IEM_RAISED_XCPT)
6778 {
6779 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6780 rcStrict = VINF_SUCCESS;
6781 }
6782
6783 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6784 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6785 return rcStrict;
6786}
6787
6788
6789/**
6790 * VM-exit helper for MOV from CRx (CRx read).
6791 */
6792static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6793{
6794 Assert(iCrReg < 16);
6795 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6796
6797 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6798 AssertRCReturn(rc, rc);
6799
6800 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6801 AssertMsg( rcStrict == VINF_SUCCESS
6802 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6803
6804 if (iGReg == X86_GREG_xSP)
6805 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6806 else
6807 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6808#ifdef VBOX_WITH_STATISTICS
6809 switch (iCrReg)
6810 {
6811 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6812 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6813 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6814 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6815 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6816 }
6817#endif
6818 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6819 return rcStrict;
6820}
6821
6822
6823/**
6824 * VM-exit helper for MOV to CRx (CRx write).
6825 */
6826static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6827{
6828 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6829
6830 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6831 AssertMsg( rcStrict == VINF_SUCCESS
6832 || rcStrict == VINF_IEM_RAISED_XCPT
6833 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6834
6835 switch (iCrReg)
6836 {
6837 case 0:
6838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6839 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6840 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6841 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6842 break;
6843
6844 case 2:
6845 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6846 /* Nothing to do here, CR2 it's not part of the VMCS. */
6847 break;
6848
6849 case 3:
6850 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6851 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6852 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6853 break;
6854
6855 case 4:
6856 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6857 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6858#ifndef IN_NEM_DARWIN
6859 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6860 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6861#else
6862 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6863#endif
6864 break;
6865
6866 case 8:
6867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6868 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6869 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6870 break;
6871
6872 default:
6873 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6874 break;
6875 }
6876
6877 if (rcStrict == VINF_IEM_RAISED_XCPT)
6878 {
6879 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6880 rcStrict = VINF_SUCCESS;
6881 }
6882 return rcStrict;
6883}
6884
6885
6886/**
6887 * VM-exit exception handler for \#PF (Page-fault exception).
6888 *
6889 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6890 */
6891static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6892{
6893 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6894 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6895
6896#ifndef IN_NEM_DARWIN
6897 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6898 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6899 { /* likely */ }
6900 else
6901#endif
6902 {
6903#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6904 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6905#endif
6906 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6907 if (!pVmxTransient->fVectoringDoublePF)
6908 {
6909 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6910 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6911 }
6912 else
6913 {
6914 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6915 Assert(!pVmxTransient->fIsNestedGuest);
6916 vmxHCSetPendingXcptDF(pVCpu);
6917 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6918 }
6919 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6920 return VINF_SUCCESS;
6921 }
6922
6923 Assert(!pVmxTransient->fIsNestedGuest);
6924
6925 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6926 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6927 if (pVmxTransient->fVectoringPF)
6928 {
6929 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6930 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6931 }
6932
6933 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6934 AssertRCReturn(rc, rc);
6935
6936 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6937 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6938
6939 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6940 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6941
6942 Log4Func(("#PF: rc=%Rrc\n", rc));
6943 if (rc == VINF_SUCCESS)
6944 {
6945 /*
6946 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6947 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6948 */
6949 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6950 TRPMResetTrap(pVCpu);
6951 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6952 return rc;
6953 }
6954
6955 if (rc == VINF_EM_RAW_GUEST_TRAP)
6956 {
6957 if (!pVmxTransient->fVectoringDoublePF)
6958 {
6959 /* It's a guest page fault and needs to be reflected to the guest. */
6960 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6961 TRPMResetTrap(pVCpu);
6962 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6963 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6964 uGstErrorCode, pVmxTransient->uExitQual);
6965 }
6966 else
6967 {
6968 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6969 TRPMResetTrap(pVCpu);
6970 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6971 vmxHCSetPendingXcptDF(pVCpu);
6972 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6973 }
6974
6975 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6976 return VINF_SUCCESS;
6977 }
6978
6979 TRPMResetTrap(pVCpu);
6980 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6981 return rc;
6982}
6983
6984
6985/**
6986 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6987 *
6988 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6989 */
6990static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6991{
6992 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6993 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6994
6995 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6996 AssertRCReturn(rc, rc);
6997
6998 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6999 {
7000 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7001 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7002
7003 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7004 * provides VM-exit instruction length. If this causes problem later,
7005 * disassemble the instruction like it's done on AMD-V. */
7006 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7007 AssertRCReturn(rc2, rc2);
7008 return rc;
7009 }
7010
7011 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7012 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7013 return VINF_SUCCESS;
7014}
7015
7016
7017/**
7018 * VM-exit exception handler for \#BP (Breakpoint exception).
7019 *
7020 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7021 */
7022static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7023{
7024 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7025 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7026
7027 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7028 AssertRCReturn(rc, rc);
7029
7030 VBOXSTRICTRC rcStrict;
7031 if (!pVmxTransient->fIsNestedGuest)
7032 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7033 else
7034 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7035
7036 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7037 {
7038 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7039 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7040 rcStrict = VINF_SUCCESS;
7041 }
7042
7043 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7044 return rcStrict;
7045}
7046
7047
7048/**
7049 * VM-exit exception handler for \#AC (Alignment-check exception).
7050 *
7051 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7052 */
7053static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7054{
7055 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7056
7057 /*
7058 * Detect #ACs caused by host having enabled split-lock detection.
7059 * Emulate such instructions.
7060 */
7061#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7062 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7063 AssertRCReturn(rc, rc);
7064 /** @todo detect split lock in cpu feature? */
7065 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7066 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7067 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7068 || CPUMGetGuestCPL(pVCpu) != 3
7069 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7070 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7071 {
7072 /*
7073 * Check for debug/trace events and import state accordingly.
7074 */
7075 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7076 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7077 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7078#ifndef IN_NEM_DARWIN
7079 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7080#endif
7081 )
7082 {
7083 if (pVM->cCpus == 1)
7084 {
7085#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7086 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7087 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7088#else
7089 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7090 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7091#endif
7092 AssertRCReturn(rc, rc);
7093 }
7094 }
7095 else
7096 {
7097 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7098 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7099 AssertRCReturn(rc, rc);
7100
7101 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7102
7103 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7104 {
7105 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7106 if (rcStrict != VINF_SUCCESS)
7107 return rcStrict;
7108 }
7109 }
7110
7111 /*
7112 * Emulate the instruction.
7113 *
7114 * We have to ignore the LOCK prefix here as we must not retrigger the
7115 * detection on the host. This isn't all that satisfactory, though...
7116 */
7117 if (pVM->cCpus == 1)
7118 {
7119 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7120 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7121
7122 /** @todo For SMP configs we should do a rendezvous here. */
7123 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7124 if (rcStrict == VINF_SUCCESS)
7125#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7126 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7127 HM_CHANGED_GUEST_RIP
7128 | HM_CHANGED_GUEST_RFLAGS
7129 | HM_CHANGED_GUEST_GPRS_MASK
7130 | HM_CHANGED_GUEST_CS
7131 | HM_CHANGED_GUEST_SS);
7132#else
7133 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7134#endif
7135 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7136 {
7137 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7138 rcStrict = VINF_SUCCESS;
7139 }
7140 return rcStrict;
7141 }
7142 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7143 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7144 return VINF_EM_EMULATE_SPLIT_LOCK;
7145 }
7146
7147 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7148 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7149 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7150
7151 /* Re-inject it. We'll detect any nesting before getting here. */
7152 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7153 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7154 return VINF_SUCCESS;
7155}
7156
7157
7158/**
7159 * VM-exit exception handler for \#DB (Debug exception).
7160 *
7161 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7162 */
7163static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7164{
7165 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7166 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7167
7168 /*
7169 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7170 */
7171 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7172
7173 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7174 uint64_t const uDR6 = X86_DR6_INIT_VAL
7175 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7176 | X86_DR6_BD | X86_DR6_BS));
7177 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7178
7179 int rc;
7180 if (!pVmxTransient->fIsNestedGuest)
7181 {
7182 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7183
7184 /*
7185 * Prevents stepping twice over the same instruction when the guest is stepping using
7186 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7187 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7188 */
7189 if ( rc == VINF_EM_DBG_STEPPED
7190 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7191 {
7192 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7193 rc = VINF_EM_RAW_GUEST_TRAP;
7194 }
7195 }
7196 else
7197 rc = VINF_EM_RAW_GUEST_TRAP;
7198 Log6Func(("rc=%Rrc\n", rc));
7199 if (rc == VINF_EM_RAW_GUEST_TRAP)
7200 {
7201 /*
7202 * The exception was for the guest. Update DR6, DR7.GD and
7203 * IA32_DEBUGCTL.LBR before forwarding it.
7204 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7205 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7206 */
7207#ifndef IN_NEM_DARWIN
7208 VMMRZCallRing3Disable(pVCpu);
7209 HM_DISABLE_PREEMPT(pVCpu);
7210
7211 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7212 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7213 if (CPUMIsGuestDebugStateActive(pVCpu))
7214 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7215
7216 HM_RESTORE_PREEMPT();
7217 VMMRZCallRing3Enable(pVCpu);
7218#else
7219 /** @todo */
7220#endif
7221
7222 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7223 AssertRCReturn(rc, rc);
7224
7225 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7226 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7227
7228 /* Paranoia. */
7229 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7230 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7231
7232 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7233 AssertRC(rc);
7234
7235 /*
7236 * Raise #DB in the guest.
7237 *
7238 * It is important to reflect exactly what the VM-exit gave us (preserving the
7239 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7240 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7241 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7242 *
7243 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7244 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7245 */
7246 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7247 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7248 return VINF_SUCCESS;
7249 }
7250
7251 /*
7252 * Not a guest trap, must be a hypervisor related debug event then.
7253 * Update DR6 in case someone is interested in it.
7254 */
7255 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7256 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7257 CPUMSetHyperDR6(pVCpu, uDR6);
7258
7259 return rc;
7260}
7261
7262
7263/**
7264 * Hacks its way around the lovely mesa driver's backdoor accesses.
7265 *
7266 * @sa hmR0SvmHandleMesaDrvGp.
7267 */
7268static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7269{
7270 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7271 RT_NOREF(pCtx);
7272
7273 /* For now we'll just skip the instruction. */
7274 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7275}
7276
7277
7278/**
7279 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7280 * backdoor logging w/o checking what it is running inside.
7281 *
7282 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7283 * backdoor port and magic numbers loaded in registers.
7284 *
7285 * @returns true if it is, false if it isn't.
7286 * @sa hmR0SvmIsMesaDrvGp.
7287 */
7288DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7289{
7290 /* 0xed: IN eAX,dx */
7291 uint8_t abInstr[1];
7292 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7293 return false;
7294
7295 /* Check that it is #GP(0). */
7296 if (pVmxTransient->uExitIntErrorCode != 0)
7297 return false;
7298
7299 /* Check magic and port. */
7300 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7301 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7302 if (pCtx->rax != UINT32_C(0x564d5868))
7303 return false;
7304 if (pCtx->dx != UINT32_C(0x5658))
7305 return false;
7306
7307 /* Flat ring-3 CS. */
7308 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7309 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7310 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7311 if (pCtx->cs.Attr.n.u2Dpl != 3)
7312 return false;
7313 if (pCtx->cs.u64Base != 0)
7314 return false;
7315
7316 /* Check opcode. */
7317 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7318 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7319 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7320 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7321 if (RT_FAILURE(rc))
7322 return false;
7323 if (abInstr[0] != 0xed)
7324 return false;
7325
7326 return true;
7327}
7328
7329
7330/**
7331 * VM-exit exception handler for \#GP (General-protection exception).
7332 *
7333 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7334 */
7335static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7336{
7337 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7338 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7339
7340 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7341 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7342#ifndef IN_NEM_DARWIN
7343 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7344 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7345 { /* likely */ }
7346 else
7347#endif
7348 {
7349#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7350# ifndef IN_NEM_DARWIN
7351 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7352# else
7353 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7354# endif
7355#endif
7356 /*
7357 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7358 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7359 */
7360 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7361 AssertRCReturn(rc, rc);
7362 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7363 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7364
7365 if ( pVmxTransient->fIsNestedGuest
7366 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7367 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7368 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7369 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7370 else
7371 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7372 return rc;
7373 }
7374
7375#ifndef IN_NEM_DARWIN
7376 Assert(CPUMIsGuestInRealModeEx(pCtx));
7377 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7378 Assert(!pVmxTransient->fIsNestedGuest);
7379
7380 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7381 AssertRCReturn(rc, rc);
7382
7383 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7384 if (rcStrict == VINF_SUCCESS)
7385 {
7386 if (!CPUMIsGuestInRealModeEx(pCtx))
7387 {
7388 /*
7389 * The guest is no longer in real-mode, check if we can continue executing the
7390 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7391 */
7392 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7393 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7394 {
7395 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7396 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7397 }
7398 else
7399 {
7400 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7401 rcStrict = VINF_EM_RESCHEDULE;
7402 }
7403 }
7404 else
7405 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7406 }
7407 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7408 {
7409 rcStrict = VINF_SUCCESS;
7410 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7411 }
7412 return VBOXSTRICTRC_VAL(rcStrict);
7413#endif
7414}
7415
7416
7417/**
7418 * VM-exit exception handler for \#DE (Divide Error).
7419 *
7420 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7421 */
7422static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7423{
7424 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7425 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7426
7427 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7428 AssertRCReturn(rc, rc);
7429
7430 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7431 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7432 {
7433 uint8_t cbInstr = 0;
7434 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7435 if (rc2 == VINF_SUCCESS)
7436 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7437 else if (rc2 == VERR_NOT_FOUND)
7438 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7439 else
7440 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7441 }
7442 else
7443 rcStrict = VINF_SUCCESS; /* Do nothing. */
7444
7445 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7446 if (RT_FAILURE(rcStrict))
7447 {
7448 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7449 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7450 rcStrict = VINF_SUCCESS;
7451 }
7452
7453 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7454 return VBOXSTRICTRC_VAL(rcStrict);
7455}
7456
7457
7458/**
7459 * VM-exit exception handler wrapper for all other exceptions that are not handled
7460 * by a specific handler.
7461 *
7462 * This simply re-injects the exception back into the VM without any special
7463 * processing.
7464 *
7465 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7466 */
7467static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7468{
7469 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7470
7471#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7472# ifndef IN_NEM_DARWIN
7473 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7474 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7475 ("uVector=%#x u32XcptBitmap=%#X32\n",
7476 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7477 NOREF(pVmcsInfo);
7478# endif
7479#endif
7480
7481 /*
7482 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7483 * would have been handled while checking exits due to event delivery.
7484 */
7485 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7486
7487#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7488 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7489 AssertRCReturn(rc, rc);
7490 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7491#endif
7492
7493#ifdef VBOX_WITH_STATISTICS
7494 switch (uVector)
7495 {
7496 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7497 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7498 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7499 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7500 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7501 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7502 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7503 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7504 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7505 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7506 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7507 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7508 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7509 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7510 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7511 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7512 default:
7513 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7514 break;
7515 }
7516#endif
7517
7518 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7519 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7520 NOREF(uVector);
7521
7522 /* Re-inject the original exception into the guest. */
7523 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7524 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7525 return VINF_SUCCESS;
7526}
7527
7528
7529/**
7530 * VM-exit exception handler for all exceptions (except NMIs!).
7531 *
7532 * @remarks This may be called for both guests and nested-guests. Take care to not
7533 * make assumptions and avoid doing anything that is not relevant when
7534 * executing a nested-guest (e.g., Mesa driver hacks).
7535 */
7536static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7537{
7538 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7539
7540 /*
7541 * If this VM-exit occurred while delivering an event through the guest IDT, take
7542 * action based on the return code and additional hints (e.g. for page-faults)
7543 * that will be updated in the VMX transient structure.
7544 */
7545 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7546 if (rcStrict == VINF_SUCCESS)
7547 {
7548 /*
7549 * If an exception caused a VM-exit due to delivery of an event, the original
7550 * event may have to be re-injected into the guest. We shall reinject it and
7551 * continue guest execution. However, page-fault is a complicated case and
7552 * needs additional processing done in vmxHCExitXcptPF().
7553 */
7554 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7555 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7556 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7557 || uVector == X86_XCPT_PF)
7558 {
7559 switch (uVector)
7560 {
7561 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7562 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7563 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7564 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7565 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7566 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7567 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7568 default:
7569 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7570 }
7571 }
7572 /* else: inject pending event before resuming guest execution. */
7573 }
7574 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7575 {
7576 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7577 rcStrict = VINF_SUCCESS;
7578 }
7579
7580 return rcStrict;
7581}
7582/** @} */
7583
7584
7585/** @name VM-exit handlers.
7586 * @{
7587 */
7588/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7589/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7590/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7591
7592/**
7593 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7594 */
7595HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7596{
7597 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7598 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7599
7600#ifndef IN_NEM_DARWIN
7601 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7602 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7603 return VINF_SUCCESS;
7604 return VINF_EM_RAW_INTERRUPT;
7605#else
7606 return VINF_SUCCESS;
7607#endif
7608}
7609
7610
7611/**
7612 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7613 * VM-exit.
7614 */
7615HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7616{
7617 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7618 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7619
7620 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7621
7622 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7623 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7624 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7625
7626 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7627 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7628 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7629 NOREF(pVmcsInfo);
7630
7631 VBOXSTRICTRC rcStrict;
7632 switch (uExitIntType)
7633 {
7634#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7635 /*
7636 * Host physical NMIs:
7637 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7638 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7639 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7640 *
7641 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7642 * See Intel spec. 27.5.5 "Updating Non-Register State".
7643 */
7644 case VMX_EXIT_INT_INFO_TYPE_NMI:
7645 {
7646 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7647 break;
7648 }
7649#endif
7650
7651 /*
7652 * Privileged software exceptions (#DB from ICEBP),
7653 * Software exceptions (#BP and #OF),
7654 * Hardware exceptions:
7655 * Process the required exceptions and resume guest execution if possible.
7656 */
7657 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7658 Assert(uVector == X86_XCPT_DB);
7659 RT_FALL_THRU();
7660 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7661 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7662 RT_FALL_THRU();
7663 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7664 {
7665 NOREF(uVector);
7666 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7667 | HMVMX_READ_EXIT_INSTR_LEN
7668 | HMVMX_READ_IDT_VECTORING_INFO
7669 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7670 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7671 break;
7672 }
7673
7674 default:
7675 {
7676 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7677 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7678 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7679 break;
7680 }
7681 }
7682
7683 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7684 return rcStrict;
7685}
7686
7687
7688/**
7689 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7690 */
7691HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7692{
7693 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7694
7695 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7696 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7697 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7698
7699 /* Evaluate and deliver pending events and resume guest execution. */
7700 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7701 return VINF_SUCCESS;
7702}
7703
7704
7705/**
7706 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7707 */
7708HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7709{
7710 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7711
7712 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7713 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7714 {
7715 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7716 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7717 }
7718
7719 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7720
7721 /*
7722 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7723 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7724 */
7725 uint32_t fIntrState;
7726 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7727 AssertRC(rc);
7728 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7729 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7730 {
7731 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7732
7733 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7734 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7735 AssertRC(rc);
7736 }
7737
7738 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7739 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7740
7741 /* Evaluate and deliver pending events and resume guest execution. */
7742 return VINF_SUCCESS;
7743}
7744
7745
7746/**
7747 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7748 */
7749HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7750{
7751 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7752 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7753}
7754
7755
7756/**
7757 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7758 */
7759HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7760{
7761 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7762 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7763}
7764
7765
7766/**
7767 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7768 */
7769HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7770{
7771 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7772
7773 /*
7774 * Get the state we need and update the exit history entry.
7775 */
7776 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7777 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7778 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7779 AssertRCReturn(rc, rc);
7780
7781 VBOXSTRICTRC rcStrict;
7782 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7783 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7784 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7785 if (!pExitRec)
7786 {
7787 /*
7788 * Regular CPUID instruction execution.
7789 */
7790 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7791 if (rcStrict == VINF_SUCCESS)
7792 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7793 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7794 {
7795 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7796 rcStrict = VINF_SUCCESS;
7797 }
7798 }
7799 else
7800 {
7801 /*
7802 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7803 */
7804 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7805 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7806 AssertRCReturn(rc2, rc2);
7807
7808 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7809 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7810
7811 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7812 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7813
7814 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7815 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7816 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7817 }
7818 return rcStrict;
7819}
7820
7821
7822/**
7823 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7824 */
7825HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7826{
7827 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7828
7829 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7830 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7831 AssertRCReturn(rc, rc);
7832
7833 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7834 return VINF_EM_RAW_EMULATE_INSTR;
7835
7836 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7837 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7838}
7839
7840
7841/**
7842 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7843 */
7844HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7845{
7846 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7847
7848 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7849 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7850 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7851 AssertRCReturn(rc, rc);
7852
7853 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7854 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7855 {
7856 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7857 we must reset offsetting on VM-entry. See @bugref{6634}. */
7858 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7859 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7860 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7861 }
7862 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7863 {
7864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7865 rcStrict = VINF_SUCCESS;
7866 }
7867 return rcStrict;
7868}
7869
7870
7871/**
7872 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7873 */
7874HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7875{
7876 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7877
7878 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7879 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7880 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7881 AssertRCReturn(rc, rc);
7882
7883 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7884 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7885 {
7886 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7887 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7888 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7889 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7890 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7891 }
7892 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7893 {
7894 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7895 rcStrict = VINF_SUCCESS;
7896 }
7897 return rcStrict;
7898}
7899
7900
7901/**
7902 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7903 */
7904HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7905{
7906 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7907
7908 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7909 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7910 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7911 AssertRCReturn(rc, rc);
7912
7913 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7914 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7915 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7916 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7917 {
7918 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7919 rcStrict = VINF_SUCCESS;
7920 }
7921 return rcStrict;
7922}
7923
7924
7925/**
7926 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7927 */
7928HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7929{
7930 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7931
7932 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7933 if (EMAreHypercallInstructionsEnabled(pVCpu))
7934 {
7935 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7936 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7937 | CPUMCTX_EXTRN_RFLAGS
7938 | CPUMCTX_EXTRN_CR0
7939 | CPUMCTX_EXTRN_SS
7940 | CPUMCTX_EXTRN_CS
7941 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7942 AssertRCReturn(rc, rc);
7943
7944 /* Perform the hypercall. */
7945 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7946 if (rcStrict == VINF_SUCCESS)
7947 {
7948 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7949 AssertRCReturn(rc, rc);
7950 }
7951 else
7952 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7953 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7954 || RT_FAILURE(rcStrict));
7955
7956 /* If the hypercall changes anything other than guest's general-purpose registers,
7957 we would need to reload the guest changed bits here before VM-entry. */
7958 }
7959 else
7960 Log4Func(("Hypercalls not enabled\n"));
7961
7962 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7963 if (RT_FAILURE(rcStrict))
7964 {
7965 vmxHCSetPendingXcptUD(pVCpu);
7966 rcStrict = VINF_SUCCESS;
7967 }
7968
7969 return rcStrict;
7970}
7971
7972
7973/**
7974 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7975 */
7976HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7977{
7978 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7979#ifndef IN_NEM_DARWIN
7980 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7981#endif
7982
7983 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7984 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7985 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7986 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7987 AssertRCReturn(rc, rc);
7988
7989 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7990
7991 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7992 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7993 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7994 {
7995 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7996 rcStrict = VINF_SUCCESS;
7997 }
7998 else
7999 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8000 VBOXSTRICTRC_VAL(rcStrict)));
8001 return rcStrict;
8002}
8003
8004
8005/**
8006 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8007 */
8008HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8009{
8010 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8011
8012 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8013 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8014 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8015 AssertRCReturn(rc, rc);
8016
8017 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8018 if (rcStrict == VINF_SUCCESS)
8019 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8020 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8021 {
8022 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8023 rcStrict = VINF_SUCCESS;
8024 }
8025
8026 return rcStrict;
8027}
8028
8029
8030/**
8031 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8032 */
8033HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8034{
8035 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8036
8037 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8038 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8039 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8040 AssertRCReturn(rc, rc);
8041
8042 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8043 if (RT_SUCCESS(rcStrict))
8044 {
8045 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8046 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8047 rcStrict = VINF_SUCCESS;
8048 }
8049
8050 return rcStrict;
8051}
8052
8053
8054/**
8055 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8056 * VM-exit.
8057 */
8058HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8059{
8060 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8061 return VINF_EM_RESET;
8062}
8063
8064
8065/**
8066 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8067 */
8068HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8069{
8070 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8071
8072 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8073 AssertRCReturn(rc, rc);
8074
8075 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8076 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8077 rc = VINF_SUCCESS;
8078 else
8079 rc = VINF_EM_HALT;
8080
8081 if (rc != VINF_SUCCESS)
8082 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8083 return rc;
8084}
8085
8086
8087#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8088/**
8089 * VM-exit handler for instructions that result in a \#UD exception delivered to
8090 * the guest.
8091 */
8092HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8093{
8094 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8095 vmxHCSetPendingXcptUD(pVCpu);
8096 return VINF_SUCCESS;
8097}
8098#endif
8099
8100
8101/**
8102 * VM-exit handler for expiry of the VMX-preemption timer.
8103 */
8104HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8105{
8106 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8107
8108 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8109 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8110Log12(("vmxHCExitPreemptTimer:\n"));
8111
8112 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8113 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8114 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8115 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8116 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8117}
8118
8119
8120/**
8121 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8122 */
8123HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8124{
8125 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8126
8127 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8128 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8129 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8130 AssertRCReturn(rc, rc);
8131
8132 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8133 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8134 : HM_CHANGED_RAISED_XCPT_MASK);
8135
8136#ifndef IN_NEM_DARWIN
8137 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8138 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8139 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8140 {
8141 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8142 hmR0VmxUpdateStartVmFunction(pVCpu);
8143 }
8144#endif
8145
8146 return rcStrict;
8147}
8148
8149
8150/**
8151 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8152 */
8153HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8154{
8155 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8156
8157 /** @todo Enable the new code after finding a reliably guest test-case. */
8158#if 1
8159 return VERR_EM_INTERPRETER;
8160#else
8161 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8162 | HMVMX_READ_EXIT_INSTR_INFO
8163 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8164 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8165 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8166 AssertRCReturn(rc, rc);
8167
8168 /* Paranoia. Ensure this has a memory operand. */
8169 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8170
8171 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8172 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8173 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8174 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8175
8176 RTGCPTR GCPtrDesc;
8177 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8178
8179 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8180 GCPtrDesc, uType);
8181 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8182 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8183 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8184 {
8185 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8186 rcStrict = VINF_SUCCESS;
8187 }
8188 return rcStrict;
8189#endif
8190}
8191
8192
8193/**
8194 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8195 * VM-exit.
8196 */
8197HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8198{
8199 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8200 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8201 AssertRCReturn(rc, rc);
8202
8203 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8204 if (RT_FAILURE(rc))
8205 return rc;
8206
8207 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8208 NOREF(uInvalidReason);
8209
8210#ifdef VBOX_STRICT
8211 uint32_t fIntrState;
8212 uint64_t u64Val;
8213 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8214 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8215 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8216
8217 Log4(("uInvalidReason %u\n", uInvalidReason));
8218 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8219 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8220 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8221
8222 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8223 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8224 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8225 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8226 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8227 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8228 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8229 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8230 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8231 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8232 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8233 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8234# ifndef IN_NEM_DARWIN
8235 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8236 {
8237 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8238 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8239 }
8240
8241 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8242# endif
8243#endif
8244
8245 return VERR_VMX_INVALID_GUEST_STATE;
8246}
8247
8248/**
8249 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8250 */
8251HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8252{
8253 /*
8254 * Cumulative notes of all recognized but unexpected VM-exits.
8255 *
8256 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8257 * nested-paging is used.
8258 *
8259 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8260 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8261 * this function (and thereby stop VM execution) for handling such instructions.
8262 *
8263 *
8264 * VMX_EXIT_INIT_SIGNAL:
8265 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8266 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8267 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8268 *
8269 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8270 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8271 * See Intel spec. "23.8 Restrictions on VMX operation".
8272 *
8273 * VMX_EXIT_SIPI:
8274 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8275 * activity state is used. We don't make use of it as our guests don't have direct
8276 * access to the host local APIC.
8277 *
8278 * See Intel spec. 25.3 "Other Causes of VM-exits".
8279 *
8280 * VMX_EXIT_IO_SMI:
8281 * VMX_EXIT_SMI:
8282 * This can only happen if we support dual-monitor treatment of SMI, which can be
8283 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8284 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8285 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8286 *
8287 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8288 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8289 *
8290 * VMX_EXIT_ERR_MSR_LOAD:
8291 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8292 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8293 * execution.
8294 *
8295 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8296 *
8297 * VMX_EXIT_ERR_MACHINE_CHECK:
8298 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8299 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8300 * #MC exception abort class exception is raised. We thus cannot assume a
8301 * reasonable chance of continuing any sort of execution and we bail.
8302 *
8303 * See Intel spec. 15.1 "Machine-check Architecture".
8304 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8305 *
8306 * VMX_EXIT_PML_FULL:
8307 * VMX_EXIT_VIRTUALIZED_EOI:
8308 * VMX_EXIT_APIC_WRITE:
8309 * We do not currently support any of these features and thus they are all unexpected
8310 * VM-exits.
8311 *
8312 * VMX_EXIT_GDTR_IDTR_ACCESS:
8313 * VMX_EXIT_LDTR_TR_ACCESS:
8314 * VMX_EXIT_RDRAND:
8315 * VMX_EXIT_RSM:
8316 * VMX_EXIT_VMFUNC:
8317 * VMX_EXIT_ENCLS:
8318 * VMX_EXIT_RDSEED:
8319 * VMX_EXIT_XSAVES:
8320 * VMX_EXIT_XRSTORS:
8321 * VMX_EXIT_UMWAIT:
8322 * VMX_EXIT_TPAUSE:
8323 * VMX_EXIT_LOADIWKEY:
8324 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8325 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8326 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8327 *
8328 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8329 */
8330 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8331 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8332 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8333}
8334
8335
8336/**
8337 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8338 */
8339HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8340{
8341 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8342
8343 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8344
8345 /** @todo Optimize this: We currently drag in the whole MSR state
8346 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8347 * MSRs required. That would require changes to IEM and possibly CPUM too.
8348 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8349 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8350 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8351 int rc;
8352 switch (idMsr)
8353 {
8354 default:
8355 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8356 __FUNCTION__);
8357 AssertRCReturn(rc, rc);
8358 break;
8359 case MSR_K8_FS_BASE:
8360 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8361 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8362 AssertRCReturn(rc, rc);
8363 break;
8364 case MSR_K8_GS_BASE:
8365 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8366 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8367 AssertRCReturn(rc, rc);
8368 break;
8369 }
8370
8371 Log4Func(("ecx=%#RX32\n", idMsr));
8372
8373#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8374 Assert(!pVmxTransient->fIsNestedGuest);
8375 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8376 {
8377 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8378 && idMsr != MSR_K6_EFER)
8379 {
8380 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8381 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8382 }
8383 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8384 {
8385 Assert(pVmcsInfo->pvMsrBitmap);
8386 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8387 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8388 {
8389 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8390 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8391 }
8392 }
8393 }
8394#endif
8395
8396 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8397 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8398 if (rcStrict == VINF_SUCCESS)
8399 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8400 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8401 {
8402 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8403 rcStrict = VINF_SUCCESS;
8404 }
8405 else
8406 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8407 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8408
8409 return rcStrict;
8410}
8411
8412
8413/**
8414 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8415 */
8416HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8417{
8418 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8419
8420 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8421
8422 /*
8423 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8424 * Although we don't need to fetch the base as it will be overwritten shortly, while
8425 * loading guest-state we would also load the entire segment register including limit
8426 * and attributes and thus we need to load them here.
8427 */
8428 /** @todo Optimize this: We currently drag in the whole MSR state
8429 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8430 * MSRs required. That would require changes to IEM and possibly CPUM too.
8431 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8432 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8433 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8434 int rc;
8435 switch (idMsr)
8436 {
8437 default:
8438 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8439 __FUNCTION__);
8440 AssertRCReturn(rc, rc);
8441 break;
8442
8443 case MSR_K8_FS_BASE:
8444 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8445 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8446 AssertRCReturn(rc, rc);
8447 break;
8448 case MSR_K8_GS_BASE:
8449 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8450 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8451 AssertRCReturn(rc, rc);
8452 break;
8453 }
8454 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8455
8456 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8457 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8458
8459 if (rcStrict == VINF_SUCCESS)
8460 {
8461 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8462
8463 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8464 if ( idMsr == MSR_IA32_APICBASE
8465 || ( idMsr >= MSR_IA32_X2APIC_START
8466 && idMsr <= MSR_IA32_X2APIC_END))
8467 {
8468 /*
8469 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8470 * When full APIC register virtualization is implemented we'll have to make
8471 * sure APIC state is saved from the VMCS before IEM changes it.
8472 */
8473 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8474 }
8475 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8476 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8477 else if (idMsr == MSR_K6_EFER)
8478 {
8479 /*
8480 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8481 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8482 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8483 */
8484 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8485 }
8486
8487 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8488 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8489 {
8490 switch (idMsr)
8491 {
8492 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8493 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8494 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8495 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8496 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8497 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8498 default:
8499 {
8500#ifndef IN_NEM_DARWIN
8501 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8502 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8503 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8504 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8505#else
8506 AssertMsgFailed(("TODO\n"));
8507#endif
8508 break;
8509 }
8510 }
8511 }
8512#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8513 else
8514 {
8515 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8516 switch (idMsr)
8517 {
8518 case MSR_IA32_SYSENTER_CS:
8519 case MSR_IA32_SYSENTER_EIP:
8520 case MSR_IA32_SYSENTER_ESP:
8521 case MSR_K8_FS_BASE:
8522 case MSR_K8_GS_BASE:
8523 {
8524 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8525 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8526 }
8527
8528 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8529 default:
8530 {
8531 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8532 {
8533 /* EFER MSR writes are always intercepted. */
8534 if (idMsr != MSR_K6_EFER)
8535 {
8536 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8537 idMsr));
8538 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8539 }
8540 }
8541
8542 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8543 {
8544 Assert(pVmcsInfo->pvMsrBitmap);
8545 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8546 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8547 {
8548 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8549 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8550 }
8551 }
8552 break;
8553 }
8554 }
8555 }
8556#endif /* VBOX_STRICT */
8557 }
8558 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8559 {
8560 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8561 rcStrict = VINF_SUCCESS;
8562 }
8563 else
8564 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8565 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8566
8567 return rcStrict;
8568}
8569
8570
8571/**
8572 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8573 */
8574HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8575{
8576 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8577
8578 /** @todo The guest has likely hit a contended spinlock. We might want to
8579 * poke a schedule different guest VCPU. */
8580 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8581 if (RT_SUCCESS(rc))
8582 return VINF_EM_RAW_INTERRUPT;
8583
8584 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8585 return rc;
8586}
8587
8588
8589/**
8590 * VM-exit handler for when the TPR value is lowered below the specified
8591 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8592 */
8593HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8594{
8595 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8596 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8597
8598 /*
8599 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8600 * We'll re-evaluate pending interrupts and inject them before the next VM
8601 * entry so we can just continue execution here.
8602 */
8603 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8604 return VINF_SUCCESS;
8605}
8606
8607
8608/**
8609 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8610 * VM-exit.
8611 *
8612 * @retval VINF_SUCCESS when guest execution can continue.
8613 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8614 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8615 * incompatible guest state for VMX execution (real-on-v86 case).
8616 */
8617HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8618{
8619 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8620 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8621
8622 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8623 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8624 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8625
8626 VBOXSTRICTRC rcStrict;
8627 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8628 uint64_t const uExitQual = pVmxTransient->uExitQual;
8629 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8630 switch (uAccessType)
8631 {
8632 /*
8633 * MOV to CRx.
8634 */
8635 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8636 {
8637 /*
8638 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8639 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8640 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8641 * PAE PDPTEs as well.
8642 */
8643 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8644 AssertRCReturn(rc, rc);
8645
8646 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8647#ifndef IN_NEM_DARWIN
8648 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8649#endif
8650 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8651 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8652
8653 /*
8654 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8655 * - When nested paging isn't used.
8656 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8657 * - We are executing in the VM debug loop.
8658 */
8659#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8660# ifndef IN_NEM_DARWIN
8661 Assert( iCrReg != 3
8662 || !VM_IS_VMX_NESTED_PAGING(pVM)
8663 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8664 || pVCpu->hmr0.s.fUsingDebugLoop);
8665# else
8666 Assert( iCrReg != 3
8667 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8668# endif
8669#endif
8670
8671 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8672 Assert( iCrReg != 8
8673 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8674
8675 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8676 AssertMsg( rcStrict == VINF_SUCCESS
8677 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8678
8679#ifndef IN_NEM_DARWIN
8680 /*
8681 * This is a kludge for handling switches back to real mode when we try to use
8682 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8683 * deal with special selector values, so we have to return to ring-3 and run
8684 * there till the selector values are V86 mode compatible.
8685 *
8686 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8687 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8688 * this function.
8689 */
8690 if ( iCrReg == 0
8691 && rcStrict == VINF_SUCCESS
8692 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8693 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8694 && (uOldCr0 & X86_CR0_PE)
8695 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8696 {
8697 /** @todo Check selectors rather than returning all the time. */
8698 Assert(!pVmxTransient->fIsNestedGuest);
8699 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8700 rcStrict = VINF_EM_RESCHEDULE_REM;
8701 }
8702#endif
8703
8704 break;
8705 }
8706
8707 /*
8708 * MOV from CRx.
8709 */
8710 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8711 {
8712 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8713 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8714
8715 /*
8716 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8717 * - When nested paging isn't used.
8718 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8719 * - We are executing in the VM debug loop.
8720 */
8721#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8722# ifndef IN_NEM_DARWIN
8723 Assert( iCrReg != 3
8724 || !VM_IS_VMX_NESTED_PAGING(pVM)
8725 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8726 || pVCpu->hmr0.s.fLeaveDone);
8727# else
8728 Assert( iCrReg != 3
8729 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8730# endif
8731#endif
8732
8733 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8734 Assert( iCrReg != 8
8735 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8736
8737 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8738 break;
8739 }
8740
8741 /*
8742 * CLTS (Clear Task-Switch Flag in CR0).
8743 */
8744 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8745 {
8746 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8747 break;
8748 }
8749
8750 /*
8751 * LMSW (Load Machine-Status Word into CR0).
8752 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8753 */
8754 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8755 {
8756 RTGCPTR GCPtrEffDst;
8757 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8758 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8759 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8760 if (fMemOperand)
8761 {
8762 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8763 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8764 }
8765 else
8766 GCPtrEffDst = NIL_RTGCPTR;
8767 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8768 break;
8769 }
8770
8771 default:
8772 {
8773 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8774 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8775 }
8776 }
8777
8778 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8779 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8780 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8781
8782 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8783 NOREF(pVM);
8784 return rcStrict;
8785}
8786
8787
8788/**
8789 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8790 * VM-exit.
8791 */
8792HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8793{
8794 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8795 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8796
8797 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8798 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8799 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8800 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8801#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8802 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8803 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8804 AssertRCReturn(rc, rc);
8805
8806 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8807 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8808 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8809 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8810 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8811 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8812 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8813 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8814
8815 /*
8816 * Update exit history to see if this exit can be optimized.
8817 */
8818 VBOXSTRICTRC rcStrict;
8819 PCEMEXITREC pExitRec = NULL;
8820 if ( !fGstStepping
8821 && !fDbgStepping)
8822 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8823 !fIOString
8824 ? !fIOWrite
8825 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8826 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8827 : !fIOWrite
8828 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8829 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8830 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8831 if (!pExitRec)
8832 {
8833 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8834 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8835
8836 uint32_t const cbValue = s_aIOSizes[uIOSize];
8837 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8838 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8839 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8840 if (fIOString)
8841 {
8842 /*
8843 * INS/OUTS - I/O String instruction.
8844 *
8845 * Use instruction-information if available, otherwise fall back on
8846 * interpreting the instruction.
8847 */
8848 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8849 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8850 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8851 if (fInsOutsInfo)
8852 {
8853 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8854 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8855 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8856 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8857 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8858 if (fIOWrite)
8859 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8860 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8861 else
8862 {
8863 /*
8864 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8865 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8866 * See Intel Instruction spec. for "INS".
8867 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8868 */
8869 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8870 }
8871 }
8872 else
8873 rcStrict = IEMExecOne(pVCpu);
8874
8875 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8876 fUpdateRipAlready = true;
8877 }
8878 else
8879 {
8880 /*
8881 * IN/OUT - I/O instruction.
8882 */
8883 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8884 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8885 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8886 if (fIOWrite)
8887 {
8888 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8889 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8890#ifndef IN_NEM_DARWIN
8891 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8892 && !pCtx->eflags.Bits.u1TF)
8893 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8894#endif
8895 }
8896 else
8897 {
8898 uint32_t u32Result = 0;
8899 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8900 if (IOM_SUCCESS(rcStrict))
8901 {
8902 /* Save result of I/O IN instr. in AL/AX/EAX. */
8903 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8904 }
8905#ifndef IN_NEM_DARWIN
8906 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8907 && !pCtx->eflags.Bits.u1TF)
8908 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8909#endif
8910 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8911 }
8912 }
8913
8914 if (IOM_SUCCESS(rcStrict))
8915 {
8916 if (!fUpdateRipAlready)
8917 {
8918 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8919 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8920 }
8921
8922 /*
8923 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8924 * while booting Fedora 17 64-bit guest.
8925 *
8926 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8927 */
8928 if (fIOString)
8929 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8930
8931 /*
8932 * If any I/O breakpoints are armed, we need to check if one triggered
8933 * and take appropriate action.
8934 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8935 */
8936#if 1
8937 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8938#else
8939 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8940 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8941 AssertRCReturn(rc, rc);
8942#endif
8943
8944 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8945 * execution engines about whether hyper BPs and such are pending. */
8946 uint32_t const uDr7 = pCtx->dr[7];
8947 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8948 && X86_DR7_ANY_RW_IO(uDr7)
8949 && (pCtx->cr4 & X86_CR4_DE))
8950 || DBGFBpIsHwIoArmed(pVM)))
8951 {
8952 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8953
8954#ifndef IN_NEM_DARWIN
8955 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8956 VMMRZCallRing3Disable(pVCpu);
8957 HM_DISABLE_PREEMPT(pVCpu);
8958
8959 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8960
8961 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8962 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8963 {
8964 /* Raise #DB. */
8965 if (fIsGuestDbgActive)
8966 ASMSetDR6(pCtx->dr[6]);
8967 if (pCtx->dr[7] != uDr7)
8968 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8969
8970 vmxHCSetPendingXcptDB(pVCpu);
8971 }
8972 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8973 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8974 else if ( rcStrict2 != VINF_SUCCESS
8975 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8976 rcStrict = rcStrict2;
8977 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8978
8979 HM_RESTORE_PREEMPT();
8980 VMMRZCallRing3Enable(pVCpu);
8981#else
8982 /** @todo */
8983#endif
8984 }
8985 }
8986
8987#ifdef VBOX_STRICT
8988 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8989 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8990 Assert(!fIOWrite);
8991 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8992 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8993 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8994 Assert(fIOWrite);
8995 else
8996 {
8997# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8998 * statuses, that the VMM device and some others may return. See
8999 * IOM_SUCCESS() for guidance. */
9000 AssertMsg( RT_FAILURE(rcStrict)
9001 || rcStrict == VINF_SUCCESS
9002 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9003 || rcStrict == VINF_EM_DBG_BREAKPOINT
9004 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9005 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9006# endif
9007 }
9008#endif
9009 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9010 }
9011 else
9012 {
9013 /*
9014 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9015 */
9016 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9017 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9018 AssertRCReturn(rc2, rc2);
9019 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9020 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9021 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9022 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9023 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9024 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9025
9026 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9027 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9028
9029 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9030 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9031 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9032 }
9033 return rcStrict;
9034}
9035
9036
9037/**
9038 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9039 * VM-exit.
9040 */
9041HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9042{
9043 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9044
9045 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9046 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9047 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9048 {
9049 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9050 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9051 {
9052 uint32_t uErrCode;
9053 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9054 {
9055 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9056 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9057 }
9058 else
9059 uErrCode = 0;
9060
9061 RTGCUINTPTR GCPtrFaultAddress;
9062 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9063 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9064 else
9065 GCPtrFaultAddress = 0;
9066
9067 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9068
9069 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9070 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9071
9072 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9073 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9074 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9075 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9076 }
9077 }
9078
9079 /* Fall back to the interpreter to emulate the task-switch. */
9080 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9081 return VERR_EM_INTERPRETER;
9082}
9083
9084
9085/**
9086 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9087 */
9088HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9089{
9090 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9091
9092 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9093 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9094 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9095 AssertRC(rc);
9096 return VINF_EM_DBG_STEPPED;
9097}
9098
9099
9100/**
9101 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9102 */
9103HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9104{
9105 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9106 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9107
9108 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9109 | HMVMX_READ_EXIT_INSTR_LEN
9110 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9111 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9112 | HMVMX_READ_IDT_VECTORING_INFO
9113 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9114
9115 /*
9116 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9117 */
9118 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9119 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9120 {
9121 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9122 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9123 {
9124 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9125 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9126 }
9127 }
9128 else
9129 {
9130 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9131 return rcStrict;
9132 }
9133
9134 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9135 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9136 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9137 AssertRCReturn(rc, rc);
9138
9139 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9140 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9141 switch (uAccessType)
9142 {
9143#ifndef IN_NEM_DARWIN
9144 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9145 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9146 {
9147 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9148 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9149 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9150
9151 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9152 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9153 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9154 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9155 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9156
9157 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9158 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9159 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9160 if ( rcStrict == VINF_SUCCESS
9161 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9162 || rcStrict == VERR_PAGE_NOT_PRESENT)
9163 {
9164 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9165 | HM_CHANGED_GUEST_APIC_TPR);
9166 rcStrict = VINF_SUCCESS;
9167 }
9168 break;
9169 }
9170#else
9171 /** @todo */
9172#endif
9173
9174 default:
9175 {
9176 Log4Func(("uAccessType=%#x\n", uAccessType));
9177 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9178 break;
9179 }
9180 }
9181
9182 if (rcStrict != VINF_SUCCESS)
9183 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9184 return rcStrict;
9185}
9186
9187
9188/**
9189 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9190 * VM-exit.
9191 */
9192HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9193{
9194 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9195 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9196
9197 /*
9198 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9199 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9200 * must emulate the MOV DRx access.
9201 */
9202 if (!pVmxTransient->fIsNestedGuest)
9203 {
9204 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9205 if ( pVmxTransient->fWasGuestDebugStateActive
9206#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9207 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9208#endif
9209 )
9210 {
9211 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9212 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9213 }
9214
9215 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9216 && !pVmxTransient->fWasHyperDebugStateActive)
9217 {
9218 Assert(!DBGFIsStepping(pVCpu));
9219 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9220
9221 /* Whether we disable intercepting MOV DRx instructions and resume
9222 the current one, or emulate it and keep intercepting them is
9223 configurable. Though it usually comes down to whether there are
9224 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9225#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9226 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9227#else
9228 bool const fResumeInstruction = true;
9229#endif
9230 if (fResumeInstruction)
9231 {
9232 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9233 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9234 AssertRC(rc);
9235 }
9236
9237#ifndef IN_NEM_DARWIN
9238 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9239 VMMRZCallRing3Disable(pVCpu);
9240 HM_DISABLE_PREEMPT(pVCpu);
9241
9242 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9243 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9244 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9245
9246 HM_RESTORE_PREEMPT();
9247 VMMRZCallRing3Enable(pVCpu);
9248#else
9249 CPUMR3NemActivateGuestDebugState(pVCpu);
9250 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9251 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9252#endif
9253
9254 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9255 if (fResumeInstruction)
9256 {
9257#ifdef VBOX_WITH_STATISTICS
9258 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9259 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9260 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9261 else
9262 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9263#endif
9264 return VINF_SUCCESS;
9265 }
9266 }
9267 }
9268
9269 /*
9270 * Import state. We must have DR7 loaded here as it's always consulted,
9271 * both for reading and writing. The other debug registers are never
9272 * exported as such.
9273 */
9274 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9275 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9276 | CPUMCTX_EXTRN_GPRS_MASK
9277 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9278 AssertRCReturn(rc, rc);
9279
9280 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9281 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9282 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9283 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9284
9285 VBOXSTRICTRC rcStrict;
9286 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9287 {
9288 /*
9289 * Write DRx register.
9290 */
9291 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9292 AssertMsg( rcStrict == VINF_SUCCESS
9293 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9294
9295 if (rcStrict == VINF_SUCCESS)
9296 {
9297 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9298 * kept it for now to avoid breaking something non-obvious. */
9299 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9300 | HM_CHANGED_GUEST_DR7);
9301 /* Update the DR6 register if guest debug state is active, otherwise we'll
9302 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9303 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9304 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9305 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9306 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9307 }
9308 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9309 {
9310 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9311 rcStrict = VINF_SUCCESS;
9312 }
9313
9314 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9315 }
9316 else
9317 {
9318 /*
9319 * Read DRx register into a general purpose register.
9320 */
9321 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9322 AssertMsg( rcStrict == VINF_SUCCESS
9323 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9324
9325 if (rcStrict == VINF_SUCCESS)
9326 {
9327 if (iGReg == X86_GREG_xSP)
9328 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9329 | HM_CHANGED_GUEST_RSP);
9330 else
9331 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9332 }
9333 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9334 {
9335 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9336 rcStrict = VINF_SUCCESS;
9337 }
9338
9339 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9340 }
9341
9342 return rcStrict;
9343}
9344
9345
9346/**
9347 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9348 * Conditional VM-exit.
9349 */
9350HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9351{
9352 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9353
9354#ifndef IN_NEM_DARWIN
9355 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9356
9357 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9358 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9359 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9360 | HMVMX_READ_IDT_VECTORING_INFO
9361 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9362 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9363
9364 /*
9365 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9366 */
9367 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9368 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9369 {
9370 /*
9371 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9372 * instruction emulation to inject the original event. Otherwise, injecting the original event
9373 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9374 */
9375 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9376 { /* likely */ }
9377 else
9378 {
9379 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9380# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9381 /** @todo NSTVMX: Think about how this should be handled. */
9382 if (pVmxTransient->fIsNestedGuest)
9383 return VERR_VMX_IPE_3;
9384# endif
9385 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9386 }
9387 }
9388 else
9389 {
9390 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9391 return rcStrict;
9392 }
9393
9394 /*
9395 * Get sufficient state and update the exit history entry.
9396 */
9397 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9398 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9399 AssertRCReturn(rc, rc);
9400
9401 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9402 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9403 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9404 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9405 if (!pExitRec)
9406 {
9407 /*
9408 * If we succeed, resume guest execution.
9409 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9410 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9411 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9412 * weird case. See @bugref{6043}.
9413 */
9414 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9415/** @todo bird: We can probably just go straight to IOM here and assume that
9416 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9417 * well. However, we need to address that aliasing workarounds that
9418 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9419 *
9420 * Might also be interesting to see if we can get this done more or
9421 * less locklessly inside IOM. Need to consider the lookup table
9422 * updating and use a bit more carefully first (or do all updates via
9423 * rendezvous) */
9424 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9425 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9426 if ( rcStrict == VINF_SUCCESS
9427 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9428 || rcStrict == VERR_PAGE_NOT_PRESENT)
9429 {
9430 /* Successfully handled MMIO operation. */
9431 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9432 | HM_CHANGED_GUEST_APIC_TPR);
9433 rcStrict = VINF_SUCCESS;
9434 }
9435 }
9436 else
9437 {
9438 /*
9439 * Frequent exit or something needing probing. Call EMHistoryExec.
9440 */
9441 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9442 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9443
9444 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9445 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9446
9447 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9448 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9449 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9450 }
9451 return rcStrict;
9452#else
9453 AssertFailed();
9454 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9455#endif
9456}
9457
9458
9459/**
9460 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9461 * VM-exit.
9462 */
9463HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9464{
9465 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9466#ifndef IN_NEM_DARWIN
9467 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9468
9469 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9470 | HMVMX_READ_EXIT_INSTR_LEN
9471 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9472 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9473 | HMVMX_READ_IDT_VECTORING_INFO
9474 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9475 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9476
9477 /*
9478 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9479 */
9480 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9481 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9482 {
9483 /*
9484 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9485 * we shall resolve the nested #PF and re-inject the original event.
9486 */
9487 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9488 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9489 }
9490 else
9491 {
9492 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9493 return rcStrict;
9494 }
9495
9496 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9497 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9498 AssertRCReturn(rc, rc);
9499
9500 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9501 uint64_t const uExitQual = pVmxTransient->uExitQual;
9502 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9503
9504 RTGCUINT uErrorCode = 0;
9505 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9506 uErrorCode |= X86_TRAP_PF_ID;
9507 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9508 uErrorCode |= X86_TRAP_PF_RW;
9509 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9510 uErrorCode |= X86_TRAP_PF_P;
9511
9512 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9513 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9514
9515 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9516
9517 /*
9518 * Handle the pagefault trap for the nested shadow table.
9519 */
9520 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9521 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9522 TRPMResetTrap(pVCpu);
9523
9524 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9525 if ( rcStrict == VINF_SUCCESS
9526 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9527 || rcStrict == VERR_PAGE_NOT_PRESENT)
9528 {
9529 /* Successfully synced our nested page tables. */
9530 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9531 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9532 return VINF_SUCCESS;
9533 }
9534 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9535 return rcStrict;
9536
9537#else /* IN_NEM_DARWIN */
9538 PVM pVM = pVCpu->CTX_SUFF(pVM);
9539 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9540 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9541 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9542 vmxHCImportGuestRip(pVCpu);
9543 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9544
9545 /*
9546 * Ask PGM for information about the given GCPhys. We need to check if we're
9547 * out of sync first.
9548 */
9549 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9550 false,
9551 false };
9552 PGMPHYSNEMPAGEINFO Info;
9553 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9554 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9555 if (RT_SUCCESS(rc))
9556 {
9557 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9558 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9559 {
9560 if (State.fCanResume)
9561 {
9562 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9563 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9564 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9565 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9566 State.fDidSomething ? "" : " no-change"));
9567 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9568 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9569 return VINF_SUCCESS;
9570 }
9571 }
9572
9573 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9574 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9575 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9576 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9577 State.fDidSomething ? "" : " no-change"));
9578 }
9579 else
9580 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9581 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9582 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9583
9584 /*
9585 * Emulate the memory access, either access handler or special memory.
9586 */
9587 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9588 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9589 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9590 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9591 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9592
9593 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9594 AssertRCReturn(rc, rc);
9595
9596 VBOXSTRICTRC rcStrict;
9597 if (!pExitRec)
9598 rcStrict = IEMExecOne(pVCpu);
9599 else
9600 {
9601 /* Frequent access or probing. */
9602 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9603 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9604 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9605 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9606 }
9607
9608 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9609
9610 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9611 return rcStrict;
9612#endif /* IN_NEM_DARWIN */
9613}
9614
9615#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9616
9617/**
9618 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9619 */
9620HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9621{
9622 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9623
9624 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9625 | HMVMX_READ_EXIT_INSTR_INFO
9626 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9627 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9628 | CPUMCTX_EXTRN_SREG_MASK
9629 | CPUMCTX_EXTRN_HWVIRT
9630 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9631 AssertRCReturn(rc, rc);
9632
9633 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9634
9635 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9636 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9637
9638 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9639 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9640 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9641 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9642 {
9643 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9644 rcStrict = VINF_SUCCESS;
9645 }
9646 return rcStrict;
9647}
9648
9649
9650/**
9651 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9652 */
9653HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9654{
9655 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9656
9657 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9658 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9659 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9660 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9661 AssertRCReturn(rc, rc);
9662
9663 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9664
9665 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9666 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9667 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9668 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9669 {
9670 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9671 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9672 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9673 }
9674 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9675 return rcStrict;
9676}
9677
9678
9679/**
9680 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9681 */
9682HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9683{
9684 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9685
9686 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9687 | HMVMX_READ_EXIT_INSTR_INFO
9688 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9689 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9690 | CPUMCTX_EXTRN_SREG_MASK
9691 | CPUMCTX_EXTRN_HWVIRT
9692 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9693 AssertRCReturn(rc, rc);
9694
9695 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9696
9697 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9698 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9699
9700 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9701 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9702 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9703 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9704 {
9705 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9706 rcStrict = VINF_SUCCESS;
9707 }
9708 return rcStrict;
9709}
9710
9711
9712/**
9713 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9714 */
9715HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9716{
9717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9718
9719 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9720 | HMVMX_READ_EXIT_INSTR_INFO
9721 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9722 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9723 | CPUMCTX_EXTRN_SREG_MASK
9724 | CPUMCTX_EXTRN_HWVIRT
9725 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9726 AssertRCReturn(rc, rc);
9727
9728 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9729
9730 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9731 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9732
9733 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9734 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9735 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9736 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9737 {
9738 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9739 rcStrict = VINF_SUCCESS;
9740 }
9741 return rcStrict;
9742}
9743
9744
9745/**
9746 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9747 */
9748HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9749{
9750 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9751
9752 /*
9753 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9754 * thus might not need to import the shadow VMCS state, it's safer just in case
9755 * code elsewhere dares look at unsynced VMCS fields.
9756 */
9757 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9758 | HMVMX_READ_EXIT_INSTR_INFO
9759 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9760 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9761 | CPUMCTX_EXTRN_SREG_MASK
9762 | CPUMCTX_EXTRN_HWVIRT
9763 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9764 AssertRCReturn(rc, rc);
9765
9766 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9767
9768 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9769 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9770 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9771
9772 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9773 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9774 {
9775 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9776
9777# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9778 /* Try for exit optimization. This is on the following instruction
9779 because it would be a waste of time to have to reinterpret the
9780 already decoded vmwrite instruction. */
9781 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9782 if (pExitRec)
9783 {
9784 /* Frequent access or probing. */
9785 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9786 AssertRCReturn(rc, rc);
9787
9788 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9789 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9790 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9791 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9792 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9793 }
9794# endif
9795 }
9796 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9797 {
9798 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9799 rcStrict = VINF_SUCCESS;
9800 }
9801 return rcStrict;
9802}
9803
9804
9805/**
9806 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9807 */
9808HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9809{
9810 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9811
9812 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9813 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9814 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9815 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9816 AssertRCReturn(rc, rc);
9817
9818 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9819
9820 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9821 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9822 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9823 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9824 {
9825 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9826 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9827 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9828 }
9829 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9830 return rcStrict;
9831}
9832
9833
9834/**
9835 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9836 */
9837HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9838{
9839 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9840
9841 /*
9842 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9843 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9844 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9845 */
9846 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9847 | HMVMX_READ_EXIT_INSTR_INFO
9848 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9849 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9850 | CPUMCTX_EXTRN_SREG_MASK
9851 | CPUMCTX_EXTRN_HWVIRT
9852 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9853 AssertRCReturn(rc, rc);
9854
9855 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9856
9857 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9858 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9859 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9860
9861 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9862 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9863 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9864 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9865 {
9866 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9867 rcStrict = VINF_SUCCESS;
9868 }
9869 return rcStrict;
9870}
9871
9872
9873/**
9874 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9875 */
9876HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9877{
9878 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9879
9880 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9881 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9882 | CPUMCTX_EXTRN_HWVIRT
9883 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9884 AssertRCReturn(rc, rc);
9885
9886 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9887
9888 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9889 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9890 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9891 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9892 {
9893 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9894 rcStrict = VINF_SUCCESS;
9895 }
9896 return rcStrict;
9897}
9898
9899
9900/**
9901 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9902 */
9903HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9904{
9905 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9906
9907 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9908 | HMVMX_READ_EXIT_INSTR_INFO
9909 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9910 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9911 | CPUMCTX_EXTRN_SREG_MASK
9912 | CPUMCTX_EXTRN_HWVIRT
9913 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9914 AssertRCReturn(rc, rc);
9915
9916 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9917
9918 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9919 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9920
9921 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9922 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9923 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9924 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9925 {
9926 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9927 rcStrict = VINF_SUCCESS;
9928 }
9929 return rcStrict;
9930}
9931
9932
9933/**
9934 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9935 */
9936HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9937{
9938 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9939
9940 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9941 | HMVMX_READ_EXIT_INSTR_INFO
9942 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9943 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9944 | CPUMCTX_EXTRN_SREG_MASK
9945 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9946 AssertRCReturn(rc, rc);
9947
9948 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9949
9950 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9951 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9952
9953 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9954 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9955 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9956 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9957 {
9958 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9959 rcStrict = VINF_SUCCESS;
9960 }
9961 return rcStrict;
9962}
9963
9964
9965# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9966/**
9967 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9968 */
9969HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9970{
9971 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9972
9973 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9974 | HMVMX_READ_EXIT_INSTR_INFO
9975 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9976 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9977 | CPUMCTX_EXTRN_SREG_MASK
9978 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9979 AssertRCReturn(rc, rc);
9980
9981 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9982
9983 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9984 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9985
9986 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9987 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9988 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9989 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9990 {
9991 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9992 rcStrict = VINF_SUCCESS;
9993 }
9994 return rcStrict;
9995}
9996# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9997#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9998/** @} */
9999
10000
10001#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10002/** @name Nested-guest VM-exit handlers.
10003 * @{
10004 */
10005/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10006/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10007/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10008
10009/**
10010 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10011 * Conditional VM-exit.
10012 */
10013HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10014{
10015 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10016
10017 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10018
10019 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10020 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10021 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10022
10023 switch (uExitIntType)
10024 {
10025# ifndef IN_NEM_DARWIN
10026 /*
10027 * Physical NMIs:
10028 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10029 */
10030 case VMX_EXIT_INT_INFO_TYPE_NMI:
10031 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10032# endif
10033
10034 /*
10035 * Hardware exceptions,
10036 * Software exceptions,
10037 * Privileged software exceptions:
10038 * Figure out if the exception must be delivered to the guest or the nested-guest.
10039 */
10040 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10041 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10042 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10043 {
10044 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10045 | HMVMX_READ_EXIT_INSTR_LEN
10046 | HMVMX_READ_IDT_VECTORING_INFO
10047 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10048
10049 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10050 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10051 {
10052 /* Exit qualification is required for debug and page-fault exceptions. */
10053 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10054
10055 /*
10056 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10057 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10058 * length. However, if delivery of a software interrupt, software exception or privileged
10059 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10060 */
10061 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10062 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10063 pVmxTransient->uExitIntErrorCode,
10064 pVmxTransient->uIdtVectoringInfo,
10065 pVmxTransient->uIdtVectoringErrorCode);
10066#ifdef DEBUG_ramshankar
10067 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10068 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10069 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10070 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10071 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10072 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10073#endif
10074 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10075 }
10076
10077 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10078 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10079 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10080 }
10081
10082 /*
10083 * Software interrupts:
10084 * VM-exits cannot be caused by software interrupts.
10085 *
10086 * External interrupts:
10087 * This should only happen when "acknowledge external interrupts on VM-exit"
10088 * control is set. However, we never set this when executing a guest or
10089 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10090 * the guest.
10091 */
10092 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10093 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10094 default:
10095 {
10096 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10097 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10098 }
10099 }
10100}
10101
10102
10103/**
10104 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10105 * Unconditional VM-exit.
10106 */
10107HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10108{
10109 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10110 return IEMExecVmxVmexitTripleFault(pVCpu);
10111}
10112
10113
10114/**
10115 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10116 */
10117HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10118{
10119 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10120
10121 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10122 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10123 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10124}
10125
10126
10127/**
10128 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10129 */
10130HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10131{
10132 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10133
10134 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10135 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10136 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10137}
10138
10139
10140/**
10141 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10142 * Unconditional VM-exit.
10143 */
10144HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10145{
10146 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10147
10148 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10149 | HMVMX_READ_EXIT_INSTR_LEN
10150 | HMVMX_READ_IDT_VECTORING_INFO
10151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10152
10153 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10154 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10155 pVmxTransient->uIdtVectoringErrorCode);
10156 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10157}
10158
10159
10160/**
10161 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10162 */
10163HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10164{
10165 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10166
10167 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10168 {
10169 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10170 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10171 }
10172 return vmxHCExitHlt(pVCpu, pVmxTransient);
10173}
10174
10175
10176/**
10177 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10178 */
10179HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10180{
10181 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10182
10183 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10184 {
10185 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10186 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10187 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10188 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10189 }
10190 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10191}
10192
10193
10194/**
10195 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10196 */
10197HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10198{
10199 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10200
10201 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10202 {
10203 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10204 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10205 }
10206 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10207}
10208
10209
10210/**
10211 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10212 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10213 */
10214HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10215{
10216 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10217
10218 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10219 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10220
10221 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10222
10223 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10224 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10225 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10226
10227 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10228 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10229 u64VmcsField &= UINT64_C(0xffffffff);
10230
10231 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10232 {
10233 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10234 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10235 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10236 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10237 }
10238
10239 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10240 return vmxHCExitVmread(pVCpu, pVmxTransient);
10241 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10242}
10243
10244
10245/**
10246 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10247 */
10248HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10249{
10250 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10251
10252 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10253 {
10254 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10255 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10256 }
10257
10258 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10259}
10260
10261
10262/**
10263 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10264 * Conditional VM-exit.
10265 */
10266HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10267{
10268 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10269
10270 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10271 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10272
10273 VBOXSTRICTRC rcStrict;
10274 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10275 switch (uAccessType)
10276 {
10277 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10278 {
10279 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10280 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10281 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10282 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10283
10284 bool fIntercept;
10285 switch (iCrReg)
10286 {
10287 case 0:
10288 case 4:
10289 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10290 break;
10291
10292 case 3:
10293 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10294 break;
10295
10296 case 8:
10297 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10298 break;
10299
10300 default:
10301 fIntercept = false;
10302 break;
10303 }
10304 if (fIntercept)
10305 {
10306 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10307 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10308 }
10309 else
10310 {
10311 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10312 AssertRCReturn(rc, rc);
10313 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10314 }
10315 break;
10316 }
10317
10318 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10319 {
10320 /*
10321 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10322 * CR2 reads do not cause a VM-exit.
10323 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10324 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10325 */
10326 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10327 if ( iCrReg == 3
10328 || iCrReg == 8)
10329 {
10330 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10331 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10332 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10333 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10334 {
10335 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10336 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10337 }
10338 else
10339 {
10340 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10341 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10342 }
10343 }
10344 else
10345 {
10346 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10347 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10348 }
10349 break;
10350 }
10351
10352 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10353 {
10354 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10355 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10356 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10357 if ( (uGstHostMask & X86_CR0_TS)
10358 && (uReadShadow & X86_CR0_TS))
10359 {
10360 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10361 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10362 }
10363 else
10364 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10365 break;
10366 }
10367
10368 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10369 {
10370 RTGCPTR GCPtrEffDst;
10371 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10372 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10373 if (fMemOperand)
10374 {
10375 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10376 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10377 }
10378 else
10379 GCPtrEffDst = NIL_RTGCPTR;
10380
10381 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10382 {
10383 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10384 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10385 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10386 }
10387 else
10388 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10389 break;
10390 }
10391
10392 default:
10393 {
10394 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10395 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10396 }
10397 }
10398
10399 if (rcStrict == VINF_IEM_RAISED_XCPT)
10400 {
10401 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10402 rcStrict = VINF_SUCCESS;
10403 }
10404 return rcStrict;
10405}
10406
10407
10408/**
10409 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10410 * Conditional VM-exit.
10411 */
10412HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10413{
10414 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10415
10416 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10417 {
10418 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10419 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10420 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10421 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10422 }
10423 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10424}
10425
10426
10427/**
10428 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10429 * Conditional VM-exit.
10430 */
10431HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10432{
10433 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10434
10435 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10436
10437 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10438 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10439 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10440
10441 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10442 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10443 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10444 {
10445 /*
10446 * IN/OUT instruction:
10447 * - Provides VM-exit instruction length.
10448 *
10449 * INS/OUTS instruction:
10450 * - Provides VM-exit instruction length.
10451 * - Provides Guest-linear address.
10452 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10453 */
10454 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10455 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10456
10457 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10458 pVmxTransient->ExitInstrInfo.u = 0;
10459 pVmxTransient->uGuestLinearAddr = 0;
10460
10461 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10462 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10463 if (fIOString)
10464 {
10465 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10466 if (fVmxInsOutsInfo)
10467 {
10468 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10469 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10470 }
10471 }
10472
10473 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10474 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10475 }
10476 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10477}
10478
10479
10480/**
10481 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10482 */
10483HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10484{
10485 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10486
10487 uint32_t fMsrpm;
10488 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10489 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10490 else
10491 fMsrpm = VMXMSRPM_EXIT_RD;
10492
10493 if (fMsrpm & VMXMSRPM_EXIT_RD)
10494 {
10495 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10496 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10497 }
10498 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10499}
10500
10501
10502/**
10503 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10504 */
10505HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10506{
10507 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10508
10509 uint32_t fMsrpm;
10510 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10511 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10512 else
10513 fMsrpm = VMXMSRPM_EXIT_WR;
10514
10515 if (fMsrpm & VMXMSRPM_EXIT_WR)
10516 {
10517 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10518 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10519 }
10520 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10521}
10522
10523
10524/**
10525 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10526 */
10527HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10528{
10529 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10530
10531 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10532 {
10533 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10534 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10535 }
10536 return vmxHCExitMwait(pVCpu, pVmxTransient);
10537}
10538
10539
10540/**
10541 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10542 * VM-exit.
10543 */
10544HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10545{
10546 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10547
10548 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10549 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10550 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10551 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10552}
10553
10554
10555/**
10556 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10557 */
10558HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10559{
10560 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10561
10562 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10563 {
10564 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10565 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10566 }
10567 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10568}
10569
10570
10571/**
10572 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10573 */
10574HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10575{
10576 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10577
10578 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10579 * PAUSE when executing a nested-guest? If it does not, we would not need
10580 * to check for the intercepts here. Just call VM-exit... */
10581
10582 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10583 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10584 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10585 {
10586 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10587 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10588 }
10589 return vmxHCExitPause(pVCpu, pVmxTransient);
10590}
10591
10592
10593/**
10594 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10595 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10596 */
10597HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10598{
10599 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10600
10601 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10602 {
10603 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10604 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10605 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10606 }
10607 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10608}
10609
10610
10611/**
10612 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10613 * VM-exit.
10614 */
10615HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10616{
10617 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10618
10619 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10620 | HMVMX_READ_EXIT_INSTR_LEN
10621 | HMVMX_READ_IDT_VECTORING_INFO
10622 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10623
10624 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10625
10626 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10627 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10628
10629 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10630 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10631 pVmxTransient->uIdtVectoringErrorCode);
10632 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10633}
10634
10635
10636/**
10637 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10638 * Conditional VM-exit.
10639 */
10640HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10641{
10642 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10643
10644 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10645 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10646 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10647}
10648
10649
10650/**
10651 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10652 * Conditional VM-exit.
10653 */
10654HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10655{
10656 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10657
10658 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10659 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10660 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10661}
10662
10663
10664/**
10665 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10666 */
10667HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10668{
10669 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10670
10671 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10672 {
10673 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10674 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10675 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10676 }
10677 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10678}
10679
10680
10681/**
10682 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10683 */
10684HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10685{
10686 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10687
10688 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10689 {
10690 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10691 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10692 }
10693 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10694}
10695
10696
10697/**
10698 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10699 */
10700HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10701{
10702 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10703
10704 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10705 {
10706 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10707 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10708 | HMVMX_READ_EXIT_INSTR_INFO
10709 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10710 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10711 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10712 }
10713 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10714}
10715
10716
10717/**
10718 * Nested-guest VM-exit handler for invalid-guest state
10719 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10720 */
10721HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10722{
10723 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10724
10725 /*
10726 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10727 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10728 * Handle it like it's in an invalid guest state of the outer guest.
10729 *
10730 * When the fast path is implemented, this should be changed to cause the corresponding
10731 * nested-guest VM-exit.
10732 */
10733 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10734}
10735
10736
10737/**
10738 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10739 * and only provide the instruction length.
10740 *
10741 * Unconditional VM-exit.
10742 */
10743HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10744{
10745 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10746
10747#ifdef VBOX_STRICT
10748 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10749 switch (pVmxTransient->uExitReason)
10750 {
10751 case VMX_EXIT_ENCLS:
10752 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10753 break;
10754
10755 case VMX_EXIT_VMFUNC:
10756 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10757 break;
10758 }
10759#endif
10760
10761 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10762 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10763}
10764
10765
10766/**
10767 * Nested-guest VM-exit handler for instructions that provide instruction length as
10768 * well as more information.
10769 *
10770 * Unconditional VM-exit.
10771 */
10772HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10773{
10774 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10775
10776# ifdef VBOX_STRICT
10777 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10778 switch (pVmxTransient->uExitReason)
10779 {
10780 case VMX_EXIT_GDTR_IDTR_ACCESS:
10781 case VMX_EXIT_LDTR_TR_ACCESS:
10782 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10783 break;
10784
10785 case VMX_EXIT_RDRAND:
10786 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10787 break;
10788
10789 case VMX_EXIT_RDSEED:
10790 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10791 break;
10792
10793 case VMX_EXIT_XSAVES:
10794 case VMX_EXIT_XRSTORS:
10795 /** @todo NSTVMX: Verify XSS-bitmap. */
10796 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10797 break;
10798
10799 case VMX_EXIT_UMWAIT:
10800 case VMX_EXIT_TPAUSE:
10801 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10802 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10803 break;
10804
10805 case VMX_EXIT_LOADIWKEY:
10806 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10807 break;
10808 }
10809# endif
10810
10811 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10812 | HMVMX_READ_EXIT_INSTR_LEN
10813 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10814 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10815 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10816}
10817
10818# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10819
10820/**
10821 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10822 * Conditional VM-exit.
10823 */
10824HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10825{
10826 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10827 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10828
10829 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10830 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10831 {
10832 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10833 | HMVMX_READ_EXIT_INSTR_LEN
10834 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10835 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10836 | HMVMX_READ_IDT_VECTORING_INFO
10837 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10838 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10839 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10840 AssertRCReturn(rc, rc);
10841
10842 /*
10843 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10844 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10845 * it's its problem to deal with that issue and we'll clear the recovered event.
10846 */
10847 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10848 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10849 { /*likely*/ }
10850 else
10851 {
10852 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10853 return rcStrict;
10854 }
10855 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10856
10857 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10858 uint64_t const uExitQual = pVmxTransient->uExitQual;
10859
10860 RTGCPTR GCPtrNestedFault;
10861 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10862 if (fIsLinearAddrValid)
10863 {
10864 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10865 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10866 }
10867 else
10868 GCPtrNestedFault = 0;
10869
10870 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10871 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10872 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10873 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10874 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10875
10876 PGMPTWALK Walk;
10877 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10878 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10879 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10880 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10881 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10882 if (RT_SUCCESS(rcStrict))
10883 return rcStrict;
10884
10885 if (fClearEventOnForward)
10886 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10887
10888 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10889 pVmxTransient->uIdtVectoringErrorCode);
10890 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10891 {
10892 VMXVEXITINFO const ExitInfo
10893 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10894 pVmxTransient->uExitQual,
10895 pVmxTransient->cbExitInstr,
10896 pVmxTransient->uGuestLinearAddr,
10897 pVmxTransient->uGuestPhysicalAddr);
10898 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10899 }
10900
10901 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10902 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10903 }
10904
10905 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10906}
10907
10908
10909/**
10910 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10911 * Conditional VM-exit.
10912 */
10913HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10914{
10915 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10916 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10917
10918 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10919 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10920 {
10921 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10922 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10923 AssertRCReturn(rc, rc);
10924
10925 PGMPTWALK Walk;
10926 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10927 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10928 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10929 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10930 0 /* GCPtrNestedFault */, &Walk);
10931 if (RT_SUCCESS(rcStrict))
10932 {
10933 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10934 return rcStrict;
10935 }
10936
10937 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10938 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10939 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10940
10941 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10942 pVmxTransient->uIdtVectoringErrorCode);
10943 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10944 }
10945
10946 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10947}
10948
10949# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10950
10951/** @} */
10952#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10953
10954
10955/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10956 * probes.
10957 *
10958 * The following few functions and associated structure contains the bloat
10959 * necessary for providing detailed debug events and dtrace probes as well as
10960 * reliable host side single stepping. This works on the principle of
10961 * "subclassing" the normal execution loop and workers. We replace the loop
10962 * method completely and override selected helpers to add necessary adjustments
10963 * to their core operation.
10964 *
10965 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10966 * any performance for debug and analysis features.
10967 *
10968 * @{
10969 */
10970
10971/**
10972 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10973 * the debug run loop.
10974 */
10975typedef struct VMXRUNDBGSTATE
10976{
10977 /** The RIP we started executing at. This is for detecting that we stepped. */
10978 uint64_t uRipStart;
10979 /** The CS we started executing with. */
10980 uint16_t uCsStart;
10981
10982 /** Whether we've actually modified the 1st execution control field. */
10983 bool fModifiedProcCtls : 1;
10984 /** Whether we've actually modified the 2nd execution control field. */
10985 bool fModifiedProcCtls2 : 1;
10986 /** Whether we've actually modified the exception bitmap. */
10987 bool fModifiedXcptBitmap : 1;
10988
10989 /** We desire the modified the CR0 mask to be cleared. */
10990 bool fClearCr0Mask : 1;
10991 /** We desire the modified the CR4 mask to be cleared. */
10992 bool fClearCr4Mask : 1;
10993 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10994 uint32_t fCpe1Extra;
10995 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10996 uint32_t fCpe1Unwanted;
10997 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10998 uint32_t fCpe2Extra;
10999 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11000 uint32_t bmXcptExtra;
11001 /** The sequence number of the Dtrace provider settings the state was
11002 * configured against. */
11003 uint32_t uDtraceSettingsSeqNo;
11004 /** VM-exits to check (one bit per VM-exit). */
11005 uint32_t bmExitsToCheck[3];
11006
11007 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11008 uint32_t fProcCtlsInitial;
11009 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11010 uint32_t fProcCtls2Initial;
11011 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11012 uint32_t bmXcptInitial;
11013} VMXRUNDBGSTATE;
11014AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11015typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11016
11017
11018/**
11019 * Initializes the VMXRUNDBGSTATE structure.
11020 *
11021 * @param pVCpu The cross context virtual CPU structure of the
11022 * calling EMT.
11023 * @param pVmxTransient The VMX-transient structure.
11024 * @param pDbgState The debug state to initialize.
11025 */
11026static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11027{
11028 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11029 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11030
11031 pDbgState->fModifiedProcCtls = false;
11032 pDbgState->fModifiedProcCtls2 = false;
11033 pDbgState->fModifiedXcptBitmap = false;
11034 pDbgState->fClearCr0Mask = false;
11035 pDbgState->fClearCr4Mask = false;
11036 pDbgState->fCpe1Extra = 0;
11037 pDbgState->fCpe1Unwanted = 0;
11038 pDbgState->fCpe2Extra = 0;
11039 pDbgState->bmXcptExtra = 0;
11040 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11041 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11042 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11043}
11044
11045
11046/**
11047 * Updates the VMSC fields with changes requested by @a pDbgState.
11048 *
11049 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11050 * immediately before executing guest code, i.e. when interrupts are disabled.
11051 * We don't check status codes here as we cannot easily assert or return in the
11052 * latter case.
11053 *
11054 * @param pVCpu The cross context virtual CPU structure.
11055 * @param pVmxTransient The VMX-transient structure.
11056 * @param pDbgState The debug state.
11057 */
11058static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11059{
11060 /*
11061 * Ensure desired flags in VMCS control fields are set.
11062 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11063 *
11064 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11065 * there should be no stale data in pCtx at this point.
11066 */
11067 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11068 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11069 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11070 {
11071 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11072 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11073 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11074 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11075 pDbgState->fModifiedProcCtls = true;
11076 }
11077
11078 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11079 {
11080 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11081 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11082 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11083 pDbgState->fModifiedProcCtls2 = true;
11084 }
11085
11086 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11087 {
11088 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11089 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11090 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11091 pDbgState->fModifiedXcptBitmap = true;
11092 }
11093
11094 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11095 {
11096 pVmcsInfo->u64Cr0Mask = 0;
11097 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11098 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11099 }
11100
11101 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11102 {
11103 pVmcsInfo->u64Cr4Mask = 0;
11104 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11105 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11106 }
11107
11108 NOREF(pVCpu);
11109}
11110
11111
11112/**
11113 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11114 * re-entry next time around.
11115 *
11116 * @returns Strict VBox status code (i.e. informational status codes too).
11117 * @param pVCpu The cross context virtual CPU structure.
11118 * @param pVmxTransient The VMX-transient structure.
11119 * @param pDbgState The debug state.
11120 * @param rcStrict The return code from executing the guest using single
11121 * stepping.
11122 */
11123static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11124 VBOXSTRICTRC rcStrict)
11125{
11126 /*
11127 * Restore VM-exit control settings as we may not reenter this function the
11128 * next time around.
11129 */
11130 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11131
11132 /* We reload the initial value, trigger what we can of recalculations the
11133 next time around. From the looks of things, that's all that's required atm. */
11134 if (pDbgState->fModifiedProcCtls)
11135 {
11136 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11137 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11138 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11139 AssertRC(rc2);
11140 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11141 }
11142
11143 /* We're currently the only ones messing with this one, so just restore the
11144 cached value and reload the field. */
11145 if ( pDbgState->fModifiedProcCtls2
11146 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11147 {
11148 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11149 AssertRC(rc2);
11150 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11151 }
11152
11153 /* If we've modified the exception bitmap, we restore it and trigger
11154 reloading and partial recalculation the next time around. */
11155 if (pDbgState->fModifiedXcptBitmap)
11156 {
11157 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11158 AssertRC(rc2);
11159 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11160 }
11161
11162 return rcStrict;
11163}
11164
11165
11166/**
11167 * Configures VM-exit controls for current DBGF and DTrace settings.
11168 *
11169 * This updates @a pDbgState and the VMCS execution control fields to reflect
11170 * the necessary VM-exits demanded by DBGF and DTrace.
11171 *
11172 * @param pVCpu The cross context virtual CPU structure.
11173 * @param pVmxTransient The VMX-transient structure. May update
11174 * fUpdatedTscOffsettingAndPreemptTimer.
11175 * @param pDbgState The debug state.
11176 */
11177static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11178{
11179#ifndef IN_NEM_DARWIN
11180 /*
11181 * Take down the dtrace serial number so we can spot changes.
11182 */
11183 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11184 ASMCompilerBarrier();
11185#endif
11186
11187 /*
11188 * We'll rebuild most of the middle block of data members (holding the
11189 * current settings) as we go along here, so start by clearing it all.
11190 */
11191 pDbgState->bmXcptExtra = 0;
11192 pDbgState->fCpe1Extra = 0;
11193 pDbgState->fCpe1Unwanted = 0;
11194 pDbgState->fCpe2Extra = 0;
11195 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11196 pDbgState->bmExitsToCheck[i] = 0;
11197
11198 /*
11199 * Software interrupts (INT XXh) - no idea how to trigger these...
11200 */
11201 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11202 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11203 || VBOXVMM_INT_SOFTWARE_ENABLED())
11204 {
11205 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11206 }
11207
11208 /*
11209 * INT3 breakpoints - triggered by #BP exceptions.
11210 */
11211 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11212 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11213
11214 /*
11215 * Exception bitmap and XCPT events+probes.
11216 */
11217 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11218 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11219 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11220
11221 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11222 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11223 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11224 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11225 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11226 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11227 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11228 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11229 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11230 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11231 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11232 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11233 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11234 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11235 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11236 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11237 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11238 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11239
11240 if (pDbgState->bmXcptExtra)
11241 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11242
11243 /*
11244 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11245 *
11246 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11247 * So, when adding/changing/removing please don't forget to update it.
11248 *
11249 * Some of the macros are picking up local variables to save horizontal space,
11250 * (being able to see it in a table is the lesser evil here).
11251 */
11252#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11253 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11254 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11255#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11256 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11257 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11258 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11259 } else do { } while (0)
11260#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11261 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11262 { \
11263 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11264 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11265 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11266 } else do { } while (0)
11267#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11268 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11269 { \
11270 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11271 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11272 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11273 } else do { } while (0)
11274#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11275 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11276 { \
11277 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11278 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11279 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11280 } else do { } while (0)
11281
11282 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11283 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11284 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11285 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11286 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11287
11288 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11289 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11290 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11291 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11292 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11293 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11294 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11295 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11296 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11297 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11298 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11299 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11300 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11301 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11302 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11303 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11304 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11305 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11306 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11307 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11308 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11309 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11310 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11311 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11312 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11313 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11314 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11315 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11316 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11317 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11318 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11319 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11320 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11321 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11322 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11323 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11324
11325 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11326 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11327 {
11328 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11329 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11330 AssertRC(rc);
11331
11332#if 0 /** @todo fix me */
11333 pDbgState->fClearCr0Mask = true;
11334 pDbgState->fClearCr4Mask = true;
11335#endif
11336 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11337 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11338 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11339 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11340 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11341 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11342 require clearing here and in the loop if we start using it. */
11343 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11344 }
11345 else
11346 {
11347 if (pDbgState->fClearCr0Mask)
11348 {
11349 pDbgState->fClearCr0Mask = false;
11350 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11351 }
11352 if (pDbgState->fClearCr4Mask)
11353 {
11354 pDbgState->fClearCr4Mask = false;
11355 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11356 }
11357 }
11358 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11359 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11360
11361 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11362 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11363 {
11364 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11365 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11366 }
11367 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11368 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11369
11370 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11371 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11372 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11373 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11374 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11375 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11376 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11377 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11378#if 0 /** @todo too slow, fix handler. */
11379 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11380#endif
11381 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11382
11383 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11384 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11385 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11386 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11387 {
11388 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11389 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11390 }
11391 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11392 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11393 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11394 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11395
11396 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11397 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11398 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11399 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11400 {
11401 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11402 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11403 }
11404 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11405 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11406 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11407 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11408
11409 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11410 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11411 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11412 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11413 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11414 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11415 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11416 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11417 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11418 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11419 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11420 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11421 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11422 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11423 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11424 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11425 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11426 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11427 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11428 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11429 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11430 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11431
11432#undef IS_EITHER_ENABLED
11433#undef SET_ONLY_XBM_IF_EITHER_EN
11434#undef SET_CPE1_XBM_IF_EITHER_EN
11435#undef SET_CPEU_XBM_IF_EITHER_EN
11436#undef SET_CPE2_XBM_IF_EITHER_EN
11437
11438 /*
11439 * Sanitize the control stuff.
11440 */
11441 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11442 if (pDbgState->fCpe2Extra)
11443 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11444 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11445 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11446#ifndef IN_NEM_DARWIN
11447 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11448 {
11449 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11450 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11451 }
11452#else
11453 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11454 {
11455 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11456 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11457 }
11458#endif
11459
11460 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11461 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11462 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11463 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11464}
11465
11466
11467/**
11468 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11469 * appropriate.
11470 *
11471 * The caller has checked the VM-exit against the
11472 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11473 * already, so we don't have to do that either.
11474 *
11475 * @returns Strict VBox status code (i.e. informational status codes too).
11476 * @param pVCpu The cross context virtual CPU structure.
11477 * @param pVmxTransient The VMX-transient structure.
11478 * @param uExitReason The VM-exit reason.
11479 *
11480 * @remarks The name of this function is displayed by dtrace, so keep it short
11481 * and to the point. No longer than 33 chars long, please.
11482 */
11483static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11484{
11485 /*
11486 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11487 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11488 *
11489 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11490 * does. Must add/change/remove both places. Same ordering, please.
11491 *
11492 * Added/removed events must also be reflected in the next section
11493 * where we dispatch dtrace events.
11494 */
11495 bool fDtrace1 = false;
11496 bool fDtrace2 = false;
11497 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11498 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11499 uint32_t uEventArg = 0;
11500#define SET_EXIT(a_EventSubName) \
11501 do { \
11502 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11503 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11504 } while (0)
11505#define SET_BOTH(a_EventSubName) \
11506 do { \
11507 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11508 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11509 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11510 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11511 } while (0)
11512 switch (uExitReason)
11513 {
11514 case VMX_EXIT_MTF:
11515 return vmxHCExitMtf(pVCpu, pVmxTransient);
11516
11517 case VMX_EXIT_XCPT_OR_NMI:
11518 {
11519 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11520 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11521 {
11522 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11523 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11524 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11525 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11526 {
11527 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11528 {
11529 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11530 uEventArg = pVmxTransient->uExitIntErrorCode;
11531 }
11532 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11533 switch (enmEvent1)
11534 {
11535 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11536 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11537 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11538 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11539 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11540 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11541 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11542 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11543 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11544 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11545 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11546 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11547 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11548 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11549 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11550 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11551 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11552 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11553 default: break;
11554 }
11555 }
11556 else
11557 AssertFailed();
11558 break;
11559
11560 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11561 uEventArg = idxVector;
11562 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11563 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11564 break;
11565 }
11566 break;
11567 }
11568
11569 case VMX_EXIT_TRIPLE_FAULT:
11570 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11571 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11572 break;
11573 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11574 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11575 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11576 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11577 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11578
11579 /* Instruction specific VM-exits: */
11580 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11581 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11582 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11583 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11584 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11585 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11586 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11587 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11588 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11589 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11590 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11591 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11592 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11593 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11594 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11595 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11596 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11597 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11598 case VMX_EXIT_MOV_CRX:
11599 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11600 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11601 SET_BOTH(CRX_READ);
11602 else
11603 SET_BOTH(CRX_WRITE);
11604 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11605 break;
11606 case VMX_EXIT_MOV_DRX:
11607 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11608 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11609 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11610 SET_BOTH(DRX_READ);
11611 else
11612 SET_BOTH(DRX_WRITE);
11613 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11614 break;
11615 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11616 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11617 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11618 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11619 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11620 case VMX_EXIT_GDTR_IDTR_ACCESS:
11621 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11622 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11623 {
11624 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11625 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11626 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11627 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11628 }
11629 break;
11630
11631 case VMX_EXIT_LDTR_TR_ACCESS:
11632 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11633 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11634 {
11635 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11636 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11637 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11638 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11639 }
11640 break;
11641
11642 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11643 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11644 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11645 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11646 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11647 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11648 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11649 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11650 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11651 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11652 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11653
11654 /* Events that aren't relevant at this point. */
11655 case VMX_EXIT_EXT_INT:
11656 case VMX_EXIT_INT_WINDOW:
11657 case VMX_EXIT_NMI_WINDOW:
11658 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11659 case VMX_EXIT_PREEMPT_TIMER:
11660 case VMX_EXIT_IO_INSTR:
11661 break;
11662
11663 /* Errors and unexpected events. */
11664 case VMX_EXIT_INIT_SIGNAL:
11665 case VMX_EXIT_SIPI:
11666 case VMX_EXIT_IO_SMI:
11667 case VMX_EXIT_SMI:
11668 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11669 case VMX_EXIT_ERR_MSR_LOAD:
11670 case VMX_EXIT_ERR_MACHINE_CHECK:
11671 case VMX_EXIT_PML_FULL:
11672 case VMX_EXIT_VIRTUALIZED_EOI:
11673 break;
11674
11675 default:
11676 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11677 break;
11678 }
11679#undef SET_BOTH
11680#undef SET_EXIT
11681
11682 /*
11683 * Dtrace tracepoints go first. We do them here at once so we don't
11684 * have to copy the guest state saving and stuff a few dozen times.
11685 * Down side is that we've got to repeat the switch, though this time
11686 * we use enmEvent since the probes are a subset of what DBGF does.
11687 */
11688 if (fDtrace1 || fDtrace2)
11689 {
11690 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11691 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11692 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11693 switch (enmEvent1)
11694 {
11695 /** @todo consider which extra parameters would be helpful for each probe. */
11696 case DBGFEVENT_END: break;
11697 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11698 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11699 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11700 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11701 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11702 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11703 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11704 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11705 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11706 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11707 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11708 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11709 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11710 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11711 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11712 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11713 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11714 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11715 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11716 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11717 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11718 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11719 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11720 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11721 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11722 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11723 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11724 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11725 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11726 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11727 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11728 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11729 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11730 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11731 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11732 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11733 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11734 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11735 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11736 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11737 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11738 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11739 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11740 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11741 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11742 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11743 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11744 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11745 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11746 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11747 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11748 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11749 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11750 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11751 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11752 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11753 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11754 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11755 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11756 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11757 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11758 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11759 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11760 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11761 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11762 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11763 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11764 }
11765 switch (enmEvent2)
11766 {
11767 /** @todo consider which extra parameters would be helpful for each probe. */
11768 case DBGFEVENT_END: break;
11769 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11770 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11771 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11772 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11773 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11774 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11775 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11776 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11777 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11778 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11779 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11780 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11781 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11782 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11783 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11784 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11785 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11786 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11787 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11788 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11789 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11790 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11791 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11792 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11793 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11794 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11795 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11796 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11797 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11798 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11799 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11800 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11801 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11802 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11803 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11804 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11805 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11806 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11807 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11808 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11809 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11810 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11811 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11812 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11813 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11814 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11815 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11816 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11817 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11818 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11819 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11820 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11821 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11822 }
11823 }
11824
11825 /*
11826 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11827 * the DBGF call will do a full check).
11828 *
11829 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11830 * Note! If we have to events, we prioritize the first, i.e. the instruction
11831 * one, in order to avoid event nesting.
11832 */
11833 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11834 if ( enmEvent1 != DBGFEVENT_END
11835 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11836 {
11837 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11838 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11839 if (rcStrict != VINF_SUCCESS)
11840 return rcStrict;
11841 }
11842 else if ( enmEvent2 != DBGFEVENT_END
11843 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11844 {
11845 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11846 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11847 if (rcStrict != VINF_SUCCESS)
11848 return rcStrict;
11849 }
11850
11851 return VINF_SUCCESS;
11852}
11853
11854
11855/**
11856 * Single-stepping VM-exit filtering.
11857 *
11858 * This is preprocessing the VM-exits and deciding whether we've gotten far
11859 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11860 * handling is performed.
11861 *
11862 * @returns Strict VBox status code (i.e. informational status codes too).
11863 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11864 * @param pVmxTransient The VMX-transient structure.
11865 * @param pDbgState The debug state.
11866 */
11867DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11868{
11869 /*
11870 * Expensive (saves context) generic dtrace VM-exit probe.
11871 */
11872 uint32_t const uExitReason = pVmxTransient->uExitReason;
11873 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11874 { /* more likely */ }
11875 else
11876 {
11877 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11878 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11879 AssertRC(rc);
11880 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11881 }
11882
11883#ifndef IN_NEM_DARWIN
11884 /*
11885 * Check for host NMI, just to get that out of the way.
11886 */
11887 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11888 { /* normally likely */ }
11889 else
11890 {
11891 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11892 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11893 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11894 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11895 }
11896#endif
11897
11898 /*
11899 * Check for single stepping event if we're stepping.
11900 */
11901 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11902 {
11903 switch (uExitReason)
11904 {
11905 case VMX_EXIT_MTF:
11906 return vmxHCExitMtf(pVCpu, pVmxTransient);
11907
11908 /* Various events: */
11909 case VMX_EXIT_XCPT_OR_NMI:
11910 case VMX_EXIT_EXT_INT:
11911 case VMX_EXIT_TRIPLE_FAULT:
11912 case VMX_EXIT_INT_WINDOW:
11913 case VMX_EXIT_NMI_WINDOW:
11914 case VMX_EXIT_TASK_SWITCH:
11915 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11916 case VMX_EXIT_APIC_ACCESS:
11917 case VMX_EXIT_EPT_VIOLATION:
11918 case VMX_EXIT_EPT_MISCONFIG:
11919 case VMX_EXIT_PREEMPT_TIMER:
11920
11921 /* Instruction specific VM-exits: */
11922 case VMX_EXIT_CPUID:
11923 case VMX_EXIT_GETSEC:
11924 case VMX_EXIT_HLT:
11925 case VMX_EXIT_INVD:
11926 case VMX_EXIT_INVLPG:
11927 case VMX_EXIT_RDPMC:
11928 case VMX_EXIT_RDTSC:
11929 case VMX_EXIT_RSM:
11930 case VMX_EXIT_VMCALL:
11931 case VMX_EXIT_VMCLEAR:
11932 case VMX_EXIT_VMLAUNCH:
11933 case VMX_EXIT_VMPTRLD:
11934 case VMX_EXIT_VMPTRST:
11935 case VMX_EXIT_VMREAD:
11936 case VMX_EXIT_VMRESUME:
11937 case VMX_EXIT_VMWRITE:
11938 case VMX_EXIT_VMXOFF:
11939 case VMX_EXIT_VMXON:
11940 case VMX_EXIT_MOV_CRX:
11941 case VMX_EXIT_MOV_DRX:
11942 case VMX_EXIT_IO_INSTR:
11943 case VMX_EXIT_RDMSR:
11944 case VMX_EXIT_WRMSR:
11945 case VMX_EXIT_MWAIT:
11946 case VMX_EXIT_MONITOR:
11947 case VMX_EXIT_PAUSE:
11948 case VMX_EXIT_GDTR_IDTR_ACCESS:
11949 case VMX_EXIT_LDTR_TR_ACCESS:
11950 case VMX_EXIT_INVEPT:
11951 case VMX_EXIT_RDTSCP:
11952 case VMX_EXIT_INVVPID:
11953 case VMX_EXIT_WBINVD:
11954 case VMX_EXIT_XSETBV:
11955 case VMX_EXIT_RDRAND:
11956 case VMX_EXIT_INVPCID:
11957 case VMX_EXIT_VMFUNC:
11958 case VMX_EXIT_RDSEED:
11959 case VMX_EXIT_XSAVES:
11960 case VMX_EXIT_XRSTORS:
11961 {
11962 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11963 AssertRCReturn(rc, rc);
11964 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11965 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11966 return VINF_EM_DBG_STEPPED;
11967 break;
11968 }
11969
11970 /* Errors and unexpected events: */
11971 case VMX_EXIT_INIT_SIGNAL:
11972 case VMX_EXIT_SIPI:
11973 case VMX_EXIT_IO_SMI:
11974 case VMX_EXIT_SMI:
11975 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11976 case VMX_EXIT_ERR_MSR_LOAD:
11977 case VMX_EXIT_ERR_MACHINE_CHECK:
11978 case VMX_EXIT_PML_FULL:
11979 case VMX_EXIT_VIRTUALIZED_EOI:
11980 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11981 break;
11982
11983 default:
11984 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11985 break;
11986 }
11987 }
11988
11989 /*
11990 * Check for debugger event breakpoints and dtrace probes.
11991 */
11992 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11993 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11994 {
11995 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11996 if (rcStrict != VINF_SUCCESS)
11997 return rcStrict;
11998 }
11999
12000 /*
12001 * Normal processing.
12002 */
12003#ifdef HMVMX_USE_FUNCTION_TABLE
12004 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12005#else
12006 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12007#endif
12008}
12009
12010/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette