VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97054

Last change on this file since 97054 was 97054, checked in by vboxsync, 2 years ago

VMM/HMVMXR0: Only use the template functions for reading into VMXTRANSIENT.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 502.2 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97054 2022-10-07 23:09:55Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330
331 /* 16-bit guest-state fields. */
332 VMX_VMCS16_GUEST_ES_SEL,
333 VMX_VMCS16_GUEST_CS_SEL,
334 VMX_VMCS16_GUEST_SS_SEL,
335 VMX_VMCS16_GUEST_DS_SEL,
336 VMX_VMCS16_GUEST_FS_SEL,
337 VMX_VMCS16_GUEST_GS_SEL,
338 VMX_VMCS16_GUEST_LDTR_SEL,
339 VMX_VMCS16_GUEST_TR_SEL,
340 VMX_VMCS16_GUEST_INTR_STATUS,
341 VMX_VMCS16_GUEST_PML_INDEX,
342
343 /* 16-bits host-state fields. */
344 VMX_VMCS16_HOST_ES_SEL,
345 VMX_VMCS16_HOST_CS_SEL,
346 VMX_VMCS16_HOST_SS_SEL,
347 VMX_VMCS16_HOST_DS_SEL,
348 VMX_VMCS16_HOST_FS_SEL,
349 VMX_VMCS16_HOST_GS_SEL,
350 VMX_VMCS16_HOST_TR_SEL,
351
352 /* 64-bit control fields. */
353 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
355 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
357 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
358 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
361 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
365 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
367 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
369 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
370 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
371 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
373 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
375 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
377 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
379 VMX_VMCS64_CTRL_EPTP_FULL,
380 VMX_VMCS64_CTRL_EPTP_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
389 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
390 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
391 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
395 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
397 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
401 VMX_VMCS64_CTRL_SPPTP_FULL,
402 VMX_VMCS64_CTRL_SPPTP_HIGH,
403 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
405 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
406 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
407 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
409
410 /* 64-bit read-only data fields. */
411 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
413
414 /* 64-bit guest-state fields. */
415 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
417 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
418 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
419 VMX_VMCS64_GUEST_PAT_FULL,
420 VMX_VMCS64_GUEST_PAT_HIGH,
421 VMX_VMCS64_GUEST_EFER_FULL,
422 VMX_VMCS64_GUEST_EFER_HIGH,
423 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
425 VMX_VMCS64_GUEST_PDPTE0_FULL,
426 VMX_VMCS64_GUEST_PDPTE0_HIGH,
427 VMX_VMCS64_GUEST_PDPTE1_FULL,
428 VMX_VMCS64_GUEST_PDPTE1_HIGH,
429 VMX_VMCS64_GUEST_PDPTE2_FULL,
430 VMX_VMCS64_GUEST_PDPTE2_HIGH,
431 VMX_VMCS64_GUEST_PDPTE3_FULL,
432 VMX_VMCS64_GUEST_PDPTE3_HIGH,
433 VMX_VMCS64_GUEST_BNDCFGS_FULL,
434 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
435 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
436 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
437 VMX_VMCS64_GUEST_PKRS_FULL,
438 VMX_VMCS64_GUEST_PKRS_HIGH,
439
440 /* 64-bit host-state fields. */
441 VMX_VMCS64_HOST_PAT_FULL,
442 VMX_VMCS64_HOST_PAT_HIGH,
443 VMX_VMCS64_HOST_EFER_FULL,
444 VMX_VMCS64_HOST_EFER_HIGH,
445 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
447 VMX_VMCS64_HOST_PKRS_FULL,
448 VMX_VMCS64_HOST_PKRS_HIGH,
449
450 /* 32-bit control fields. */
451 VMX_VMCS32_CTRL_PIN_EXEC,
452 VMX_VMCS32_CTRL_PROC_EXEC,
453 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
454 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
456 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
457 VMX_VMCS32_CTRL_EXIT,
458 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
459 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY,
461 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
462 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
463 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
464 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
465 VMX_VMCS32_CTRL_TPR_THRESHOLD,
466 VMX_VMCS32_CTRL_PROC_EXEC2,
467 VMX_VMCS32_CTRL_PLE_GAP,
468 VMX_VMCS32_CTRL_PLE_WINDOW,
469
470 /* 32-bits read-only fields. */
471 VMX_VMCS32_RO_VM_INSTR_ERROR,
472 VMX_VMCS32_RO_EXIT_REASON,
473 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
475 VMX_VMCS32_RO_IDT_VECTORING_INFO,
476 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
477 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
478 VMX_VMCS32_RO_EXIT_INSTR_INFO,
479
480 /* 32-bit guest-state fields. */
481 VMX_VMCS32_GUEST_ES_LIMIT,
482 VMX_VMCS32_GUEST_CS_LIMIT,
483 VMX_VMCS32_GUEST_SS_LIMIT,
484 VMX_VMCS32_GUEST_DS_LIMIT,
485 VMX_VMCS32_GUEST_FS_LIMIT,
486 VMX_VMCS32_GUEST_GS_LIMIT,
487 VMX_VMCS32_GUEST_LDTR_LIMIT,
488 VMX_VMCS32_GUEST_TR_LIMIT,
489 VMX_VMCS32_GUEST_GDTR_LIMIT,
490 VMX_VMCS32_GUEST_IDTR_LIMIT,
491 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_INT_STATE,
500 VMX_VMCS32_GUEST_ACTIVITY_STATE,
501 VMX_VMCS32_GUEST_SMBASE,
502 VMX_VMCS32_GUEST_SYSENTER_CS,
503 VMX_VMCS32_PREEMPT_TIMER_VALUE,
504
505 /* 32-bit host-state fields. */
506 VMX_VMCS32_HOST_SYSENTER_CS,
507
508 /* Natural-width control fields. */
509 VMX_VMCS_CTRL_CR0_MASK,
510 VMX_VMCS_CTRL_CR4_MASK,
511 VMX_VMCS_CTRL_CR0_READ_SHADOW,
512 VMX_VMCS_CTRL_CR4_READ_SHADOW,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
517
518 /* Natural-width read-only data fields. */
519 VMX_VMCS_RO_EXIT_QUALIFICATION,
520 VMX_VMCS_RO_IO_RCX,
521 VMX_VMCS_RO_IO_RSI,
522 VMX_VMCS_RO_IO_RDI,
523 VMX_VMCS_RO_IO_RIP,
524 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
525
526 /* Natural-width guest-state field */
527 VMX_VMCS_GUEST_CR0,
528 VMX_VMCS_GUEST_CR3,
529 VMX_VMCS_GUEST_CR4,
530 VMX_VMCS_GUEST_ES_BASE,
531 VMX_VMCS_GUEST_CS_BASE,
532 VMX_VMCS_GUEST_SS_BASE,
533 VMX_VMCS_GUEST_DS_BASE,
534 VMX_VMCS_GUEST_FS_BASE,
535 VMX_VMCS_GUEST_GS_BASE,
536 VMX_VMCS_GUEST_LDTR_BASE,
537 VMX_VMCS_GUEST_TR_BASE,
538 VMX_VMCS_GUEST_GDTR_BASE,
539 VMX_VMCS_GUEST_IDTR_BASE,
540 VMX_VMCS_GUEST_DR7,
541 VMX_VMCS_GUEST_RSP,
542 VMX_VMCS_GUEST_RIP,
543 VMX_VMCS_GUEST_RFLAGS,
544 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
545 VMX_VMCS_GUEST_SYSENTER_ESP,
546 VMX_VMCS_GUEST_SYSENTER_EIP,
547 VMX_VMCS_GUEST_S_CET,
548 VMX_VMCS_GUEST_SSP,
549 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
550
551 /* Natural-width host-state fields */
552 VMX_VMCS_HOST_CR0,
553 VMX_VMCS_HOST_CR3,
554 VMX_VMCS_HOST_CR4,
555 VMX_VMCS_HOST_FS_BASE,
556 VMX_VMCS_HOST_GS_BASE,
557 VMX_VMCS_HOST_TR_BASE,
558 VMX_VMCS_HOST_GDTR_BASE,
559 VMX_VMCS_HOST_IDTR_BASE,
560 VMX_VMCS_HOST_SYSENTER_ESP,
561 VMX_VMCS_HOST_SYSENTER_EIP,
562 VMX_VMCS_HOST_RSP,
563 VMX_VMCS_HOST_RIP,
564 VMX_VMCS_HOST_S_CET,
565 VMX_VMCS_HOST_SSP,
566 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
567};
568#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
569
570#ifdef VBOX_STRICT
571static const uint32_t g_aVmcsSegBase[] =
572{
573 VMX_VMCS_GUEST_ES_BASE,
574 VMX_VMCS_GUEST_CS_BASE,
575 VMX_VMCS_GUEST_SS_BASE,
576 VMX_VMCS_GUEST_DS_BASE,
577 VMX_VMCS_GUEST_FS_BASE,
578 VMX_VMCS_GUEST_GS_BASE
579};
580static const uint32_t g_aVmcsSegSel[] =
581{
582 VMX_VMCS16_GUEST_ES_SEL,
583 VMX_VMCS16_GUEST_CS_SEL,
584 VMX_VMCS16_GUEST_SS_SEL,
585 VMX_VMCS16_GUEST_DS_SEL,
586 VMX_VMCS16_GUEST_FS_SEL,
587 VMX_VMCS16_GUEST_GS_SEL
588};
589static const uint32_t g_aVmcsSegLimit[] =
590{
591 VMX_VMCS32_GUEST_ES_LIMIT,
592 VMX_VMCS32_GUEST_CS_LIMIT,
593 VMX_VMCS32_GUEST_SS_LIMIT,
594 VMX_VMCS32_GUEST_DS_LIMIT,
595 VMX_VMCS32_GUEST_FS_LIMIT,
596 VMX_VMCS32_GUEST_GS_LIMIT
597};
598static const uint32_t g_aVmcsSegAttr[] =
599{
600 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
601 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
602 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
603 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
604 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
605 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
606};
607AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
608AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
609AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
610AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
611#endif /* VBOX_STRICT */
612
613#ifdef HMVMX_USE_FUNCTION_TABLE
614/**
615 * VMX_EXIT dispatch table.
616 */
617static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
618{
619 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
620 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
621 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
622 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
623 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
624 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
625 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
626 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
627 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
628 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
629 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
630 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
631 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
632 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
633 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
634 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
635 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
636 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
637 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
639 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
640 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
641 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
642 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
643 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
644 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
645 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
646 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
647 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
648#else
649 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
650 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
651 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
652 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
653 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
654 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
655 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
656 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
657 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
658#endif
659 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
660 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
661 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
662 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
663 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
664 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
665 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
666 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
667 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
668 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
669 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
670 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
671 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
672 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
673 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
674 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
675 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
676 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
677 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
678 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
679 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
680 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
681#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
682 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
683#else
684 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
685#endif
686 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
687 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
688#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
689 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
690#else
691 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
692#endif
693 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
694 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
695 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
696 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
697 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
698 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
699 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
700 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
701 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
702 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
703 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
704 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
705 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
706 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
707 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
708 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
709};
710#endif /* HMVMX_USE_FUNCTION_TABLE */
711
712#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
713static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
714{
715 /* 0 */ "(Not Used)",
716 /* 1 */ "VMCALL executed in VMX root operation.",
717 /* 2 */ "VMCLEAR with invalid physical address.",
718 /* 3 */ "VMCLEAR with VMXON pointer.",
719 /* 4 */ "VMLAUNCH with non-clear VMCS.",
720 /* 5 */ "VMRESUME with non-launched VMCS.",
721 /* 6 */ "VMRESUME after VMXOFF",
722 /* 7 */ "VM-entry with invalid control fields.",
723 /* 8 */ "VM-entry with invalid host state fields.",
724 /* 9 */ "VMPTRLD with invalid physical address.",
725 /* 10 */ "VMPTRLD with VMXON pointer.",
726 /* 11 */ "VMPTRLD with incorrect revision identifier.",
727 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
728 /* 13 */ "VMWRITE to read-only VMCS component.",
729 /* 14 */ "(Not Used)",
730 /* 15 */ "VMXON executed in VMX root operation.",
731 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
732 /* 17 */ "VM-entry with non-launched executing VMCS.",
733 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
734 /* 19 */ "VMCALL with non-clear VMCS.",
735 /* 20 */ "VMCALL with invalid VM-exit control fields.",
736 /* 21 */ "(Not Used)",
737 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
738 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
739 /* 24 */ "VMCALL with invalid SMM-monitor features.",
740 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
741 /* 26 */ "VM-entry with events blocked by MOV SS.",
742 /* 27 */ "(Not Used)",
743 /* 28 */ "Invalid operand to INVEPT/INVVPID."
744};
745#endif /* VBOX_STRICT && LOG_ENABLED */
746
747
748/**
749 * Gets the CR0 guest/host mask.
750 *
751 * These bits typically does not change through the lifetime of a VM. Any bit set in
752 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
753 * by the guest.
754 *
755 * @returns The CR0 guest/host mask.
756 * @param pVCpu The cross context virtual CPU structure.
757 */
758static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
759{
760 /*
761 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
762 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
763 *
764 * Furthermore, modifications to any bits that are reserved/unspecified currently
765 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
766 * when future CPUs specify and use currently reserved/unspecified bits.
767 */
768 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
769 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
770 * and @bugref{6944}. */
771 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
772 return ( X86_CR0_PE
773 | X86_CR0_NE
774 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
775 | X86_CR0_PG
776 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
777}
778
779
780/**
781 * Gets the CR4 guest/host mask.
782 *
783 * These bits typically does not change through the lifetime of a VM. Any bit set in
784 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
785 * by the guest.
786 *
787 * @returns The CR4 guest/host mask.
788 * @param pVCpu The cross context virtual CPU structure.
789 */
790static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
791{
792 /*
793 * We construct a mask of all CR4 bits that the guest can modify without causing
794 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
795 * a VM-exit when the guest attempts to modify them when executing using
796 * hardware-assisted VMX.
797 *
798 * When a feature is not exposed to the guest (and may be present on the host),
799 * we want to intercept guest modifications to the bit so we can emulate proper
800 * behavior (e.g., #GP).
801 *
802 * Furthermore, only modifications to those bits that don't require immediate
803 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
804 * depends on CR3 which might not always be the guest value while executing
805 * using hardware-assisted VMX.
806 */
807 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
808 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
809#ifdef IN_NEM_DARWIN
810 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
811#endif
812 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
813
814 /*
815 * Paranoia.
816 * Ensure features exposed to the guest are present on the host.
817 */
818 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
819#ifdef IN_NEM_DARWIN
820 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
821#endif
822 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
823
824 uint64_t const fGstMask = X86_CR4_PVI
825 | X86_CR4_TSD
826 | X86_CR4_DE
827 | X86_CR4_MCE
828 | X86_CR4_PCE
829 | X86_CR4_OSXMMEEXCPT
830 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
831#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
832 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
833 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
834#endif
835 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
836 return ~fGstMask;
837}
838
839
840/**
841 * Adds one or more exceptions to the exception bitmap and commits it to the current
842 * VMCS.
843 *
844 * @param pVCpu The cross context virtual CPU structure.
845 * @param pVmxTransient The VMX-transient structure.
846 * @param uXcptMask The exception(s) to add.
847 */
848static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
849{
850 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
851 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
852 if ((uXcptBitmap & uXcptMask) != uXcptMask)
853 {
854 uXcptBitmap |= uXcptMask;
855 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
856 AssertRC(rc);
857 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
858 }
859}
860
861
862/**
863 * Adds an exception to the exception bitmap and commits it to the current VMCS.
864 *
865 * @param pVCpu The cross context virtual CPU structure.
866 * @param pVmxTransient The VMX-transient structure.
867 * @param uXcpt The exception to add.
868 */
869static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
870{
871 Assert(uXcpt <= X86_XCPT_LAST);
872 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
873}
874
875
876/**
877 * Remove one or more exceptions from the exception bitmap and commits it to the
878 * current VMCS.
879 *
880 * This takes care of not removing the exception intercept if a nested-guest
881 * requires the exception to be intercepted.
882 *
883 * @returns VBox status code.
884 * @param pVCpu The cross context virtual CPU structure.
885 * @param pVmxTransient The VMX-transient structure.
886 * @param uXcptMask The exception(s) to remove.
887 */
888static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
889{
890 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
891 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
892 if (u32XcptBitmap & uXcptMask)
893 {
894#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
895 if (!pVmxTransient->fIsNestedGuest)
896 { /* likely */ }
897 else
898 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
899#endif
900#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
901 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
902 | RT_BIT(X86_XCPT_DE)
903 | RT_BIT(X86_XCPT_NM)
904 | RT_BIT(X86_XCPT_TS)
905 | RT_BIT(X86_XCPT_UD)
906 | RT_BIT(X86_XCPT_NP)
907 | RT_BIT(X86_XCPT_SS)
908 | RT_BIT(X86_XCPT_GP)
909 | RT_BIT(X86_XCPT_PF)
910 | RT_BIT(X86_XCPT_MF));
911#elif defined(HMVMX_ALWAYS_TRAP_PF)
912 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
913#endif
914 if (uXcptMask)
915 {
916 /* Validate we are not removing any essential exception intercepts. */
917#ifndef IN_NEM_DARWIN
918 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
919#else
920 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
921#endif
922 NOREF(pVCpu);
923 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
924 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
925
926 /* Remove it from the exception bitmap. */
927 u32XcptBitmap &= ~uXcptMask;
928
929 /* Commit and update the cache if necessary. */
930 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
931 {
932 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
933 AssertRC(rc);
934 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
935 }
936 }
937 }
938 return VINF_SUCCESS;
939}
940
941
942/**
943 * Remove an exceptions from the exception bitmap and commits it to the current
944 * VMCS.
945 *
946 * @returns VBox status code.
947 * @param pVCpu The cross context virtual CPU structure.
948 * @param pVmxTransient The VMX-transient structure.
949 * @param uXcpt The exception to remove.
950 */
951static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
952{
953 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
954}
955
956#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
957
958/**
959 * Loads the shadow VMCS specified by the VMCS info. object.
960 *
961 * @returns VBox status code.
962 * @param pVmcsInfo The VMCS info. object.
963 *
964 * @remarks Can be called with interrupts disabled.
965 */
966static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
967{
968 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
969 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
970
971 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
972 if (RT_SUCCESS(rc))
973 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
974 return rc;
975}
976
977
978/**
979 * Clears the shadow VMCS specified by the VMCS info. object.
980 *
981 * @returns VBox status code.
982 * @param pVmcsInfo The VMCS info. object.
983 *
984 * @remarks Can be called with interrupts disabled.
985 */
986static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
987{
988 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
989 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
990
991 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
992 if (RT_SUCCESS(rc))
993 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
994 return rc;
995}
996
997
998/**
999 * Switches from and to the specified VMCSes.
1000 *
1001 * @returns VBox status code.
1002 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1003 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1004 *
1005 * @remarks Called with interrupts disabled.
1006 */
1007static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1008{
1009 /*
1010 * Clear the VMCS we are switching out if it has not already been cleared.
1011 * This will sync any CPU internal data back to the VMCS.
1012 */
1013 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1014 {
1015 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1016 if (RT_SUCCESS(rc))
1017 {
1018 /*
1019 * The shadow VMCS, if any, would not be active at this point since we
1020 * would have cleared it while importing the virtual hardware-virtualization
1021 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1022 * clear the shadow VMCS here, just assert for safety.
1023 */
1024 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1025 }
1026 else
1027 return rc;
1028 }
1029
1030 /*
1031 * Clear the VMCS we are switching to if it has not already been cleared.
1032 * This will initialize the VMCS launch state to "clear" required for loading it.
1033 *
1034 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1035 */
1036 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1037 {
1038 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1039 if (RT_SUCCESS(rc))
1040 { /* likely */ }
1041 else
1042 return rc;
1043 }
1044
1045 /*
1046 * Finally, load the VMCS we are switching to.
1047 */
1048 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1049}
1050
1051
1052/**
1053 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1054 * caller.
1055 *
1056 * @returns VBox status code.
1057 * @param pVCpu The cross context virtual CPU structure.
1058 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1059 * true) or guest VMCS (pass false).
1060 */
1061static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1062{
1063 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1064 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1065
1066 PVMXVMCSINFO pVmcsInfoFrom;
1067 PVMXVMCSINFO pVmcsInfoTo;
1068 if (fSwitchToNstGstVmcs)
1069 {
1070 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1071 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1072 }
1073 else
1074 {
1075 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1076 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1077 }
1078
1079 /*
1080 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1081 * preemption hook code path acquires the current VMCS.
1082 */
1083 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1084
1085 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1086 if (RT_SUCCESS(rc))
1087 {
1088 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1089 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1090
1091 /*
1092 * If we are switching to a VMCS that was executed on a different host CPU or was
1093 * never executed before, flag that we need to export the host state before executing
1094 * guest/nested-guest code using hardware-assisted VMX.
1095 *
1096 * This could probably be done in a preemptible context since the preemption hook
1097 * will flag the necessary change in host context. However, since preemption is
1098 * already disabled and to avoid making assumptions about host specific code in
1099 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1100 * disabled.
1101 */
1102 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1103 { /* likely */ }
1104 else
1105 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1106
1107 ASMSetFlags(fEFlags);
1108
1109 /*
1110 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1111 * flag that we need to update the host MSR values there. Even if we decide in the
1112 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1113 * if its content differs, we would have to update the host MSRs anyway.
1114 */
1115 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1116 }
1117 else
1118 ASMSetFlags(fEFlags);
1119 return rc;
1120}
1121
1122#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1123#ifdef VBOX_STRICT
1124
1125/**
1126 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1127 * transient structure.
1128 *
1129 * @param pVCpu The cross context virtual CPU structure.
1130 * @param pVmxTransient The VMX-transient structure.
1131 */
1132DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1133{
1134 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1135 AssertRC(rc);
1136}
1137
1138
1139/**
1140 * Reads the VM-entry exception error code field from the VMCS into
1141 * the VMX transient structure.
1142 *
1143 * @param pVCpu The cross context virtual CPU structure.
1144 * @param pVmxTransient The VMX-transient structure.
1145 */
1146DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1147{
1148 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1149 AssertRC(rc);
1150}
1151
1152
1153/**
1154 * Reads the VM-entry exception error code field from the VMCS into
1155 * the VMX transient structure.
1156 *
1157 * @param pVCpu The cross context virtual CPU structure.
1158 * @param pVmxTransient The VMX-transient structure.
1159 */
1160DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1161{
1162 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1163 AssertRC(rc);
1164}
1165
1166#endif /* VBOX_STRICT */
1167
1168/**
1169 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1170 * transient structure.
1171 *
1172 * @param pVCpu The cross context virtual CPU structure.
1173 * @param pVmxTransient The VMX-transient structure.
1174 */
1175DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1176{
1177 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1178 {
1179 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1180 AssertRC(rc);
1181 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1182 }
1183}
1184
1185
1186/**
1187 * Reads the VM-exit interruption error code from the VMCS into the VMX
1188 * transient structure.
1189 *
1190 * @param pVCpu The cross context virtual CPU structure.
1191 * @param pVmxTransient The VMX-transient structure.
1192 */
1193DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1194{
1195 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1196 {
1197 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1198 AssertRC(rc);
1199 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1200 }
1201}
1202
1203
1204/**
1205 * Reads the VM-exit instruction length field from the VMCS into the VMX
1206 * transient structure.
1207 *
1208 * @param pVCpu The cross context virtual CPU structure.
1209 * @param pVmxTransient The VMX-transient structure.
1210 */
1211DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1212{
1213 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1214 {
1215 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1216 AssertRC(rc);
1217 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1218 }
1219}
1220
1221
1222/**
1223 * Reads the VM-exit instruction-information field from the VMCS into
1224 * the VMX transient structure.
1225 *
1226 * @param pVCpu The cross context virtual CPU structure.
1227 * @param pVmxTransient The VMX-transient structure.
1228 */
1229DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1230{
1231 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1232 {
1233 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1234 AssertRC(rc);
1235 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1236 }
1237}
1238
1239
1240/**
1241 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1242 *
1243 * @param pVCpu The cross context virtual CPU structure.
1244 * @param pVmxTransient The VMX-transient structure.
1245 */
1246DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1247{
1248 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1249 {
1250 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1251 AssertRC(rc);
1252 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1253 }
1254}
1255
1256
1257/**
1258 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1259 *
1260 * @param pVCpu The cross context virtual CPU structure.
1261 * @param pVmxTransient The VMX-transient structure.
1262 */
1263DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1264{
1265 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1266 {
1267 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1268 AssertRC(rc);
1269 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1270 }
1271}
1272
1273
1274/**
1275 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1276 *
1277 * @param pVCpu The cross context virtual CPU structure.
1278 * @param pVmxTransient The VMX-transient structure.
1279 */
1280DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1281{
1282 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1283 {
1284 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1285 AssertRC(rc);
1286 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1287 }
1288}
1289
1290#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1291/**
1292 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1293 * structure.
1294 *
1295 * @param pVCpu The cross context virtual CPU structure.
1296 * @param pVmxTransient The VMX-transient structure.
1297 */
1298DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1299{
1300 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1301 {
1302 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1303 AssertRC(rc);
1304 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1305 }
1306}
1307#endif
1308
1309/**
1310 * Reads the IDT-vectoring information field from the VMCS into the VMX
1311 * transient structure.
1312 *
1313 * @param pVCpu The cross context virtual CPU structure.
1314 * @param pVmxTransient The VMX-transient structure.
1315 *
1316 * @remarks No-long-jump zone!!!
1317 */
1318DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1319{
1320 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1321 {
1322 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1323 AssertRC(rc);
1324 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1325 }
1326}
1327
1328
1329/**
1330 * Reads the IDT-vectoring error code from the VMCS into the VMX
1331 * transient structure.
1332 *
1333 * @param pVCpu The cross context virtual CPU structure.
1334 * @param pVmxTransient The VMX-transient structure.
1335 */
1336DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1337{
1338 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1339 {
1340 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1341 AssertRC(rc);
1342 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1343 }
1344}
1345
1346
1347/**
1348 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1349 *
1350 * Don't call directly unless the it's likely that some or all of the fields
1351 * given in @a a_fReadMask have already been read.
1352 *
1353 * @tparam a_fReadMask The fields to read.
1354 * @param pVCpu The cross context virtual CPU structure.
1355 * @param pVmxTransient The VMX-transient structure.
1356 */
1357template<uint32_t const a_fReadMask>
1358static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1359{
1360 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1361 {
1362 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1363
1364 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1365 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1366 {
1367 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1368 AssertRC(rc);
1369 }
1370 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1371 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1372 {
1373 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1374 AssertRC(rc);
1375 }
1376 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1377 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1378 {
1379 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1380 AssertRC(rc);
1381 }
1382 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1383 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1384 {
1385 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1386 AssertRC(rc);
1387 }
1388 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1389 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1390 {
1391 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1392 AssertRC(rc);
1393 }
1394 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1395 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1396 {
1397 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1398 AssertRC(rc);
1399 }
1400 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1401 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1402 {
1403 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1404 AssertRC(rc);
1405 }
1406 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1407 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1408 {
1409 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1410 AssertRC(rc);
1411 }
1412 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1413 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1414 {
1415 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1416 AssertRC(rc);
1417 }
1418
1419 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1420 }
1421}
1422
1423
1424/**
1425 * Reads VMCS fields into the VMXTRANSIENT structure.
1426 *
1427 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1428 * generating an optimized read sequences w/o any conditionals between in
1429 * non-strict builds.
1430 *
1431 * @tparam a_fReadMask The fields to read. One or more of the
1432 * HMVMX_READ_XXX fields ORed together.
1433 * @param pVCpu The cross context virtual CPU structure.
1434 * @param pVmxTransient The VMX-transient structure.
1435 */
1436template<uint32_t const a_fReadMask>
1437DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1438{
1439 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1440 {
1441 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1442 {
1443 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1444 AssertRC(rc);
1445 }
1446 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1447 {
1448 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1449 AssertRC(rc);
1450 }
1451 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1452 {
1453 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1454 AssertRC(rc);
1455 }
1456 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1457 {
1458 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1459 AssertRC(rc);
1460 }
1461 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1462 {
1463 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1464 AssertRC(rc);
1465 }
1466 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1467 {
1468 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1469 AssertRC(rc);
1470 }
1471 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1472 {
1473 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1474 AssertRC(rc);
1475 }
1476 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1477 {
1478 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1479 AssertRC(rc);
1480 }
1481 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1482 {
1483 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1484 AssertRC(rc);
1485 }
1486
1487 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1488 }
1489 else
1490 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1491}
1492
1493
1494#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1495/**
1496 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1497 *
1498 * @param pVCpu The cross context virtual CPU structure.
1499 * @param pVmxTransient The VMX-transient structure.
1500 */
1501static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1502{
1503 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1504 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1505 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1506 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1507 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1508 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1509 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1510 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1511 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1512 AssertRC(rc);
1513 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1514 | HMVMX_READ_EXIT_INSTR_LEN
1515 | HMVMX_READ_EXIT_INSTR_INFO
1516 | HMVMX_READ_IDT_VECTORING_INFO
1517 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1518 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1519 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1520 | HMVMX_READ_GUEST_LINEAR_ADDR
1521 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1522}
1523#endif
1524
1525/**
1526 * Verifies that our cached values of the VMCS fields are all consistent with
1527 * what's actually present in the VMCS.
1528 *
1529 * @returns VBox status code.
1530 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1531 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1532 * VMCS content. HMCPU error-field is
1533 * updated, see VMX_VCI_XXX.
1534 * @param pVCpu The cross context virtual CPU structure.
1535 * @param pVmcsInfo The VMCS info. object.
1536 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1537 */
1538static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1539{
1540 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1541
1542 uint32_t u32Val;
1543 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1544 AssertRC(rc);
1545 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1546 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1547 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1548 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1549
1550 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1551 AssertRC(rc);
1552 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1553 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1554 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1555 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1556
1557 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1558 AssertRC(rc);
1559 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1560 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1561 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1562 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1563
1564 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1565 AssertRC(rc);
1566 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1567 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1568 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1569 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1570
1571 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1572 {
1573 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1574 AssertRC(rc);
1575 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1576 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1577 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1578 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1579 }
1580
1581 uint64_t u64Val;
1582 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1583 {
1584 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1585 AssertRC(rc);
1586 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1587 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1588 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1589 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1590 }
1591
1592 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1593 AssertRC(rc);
1594 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1595 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1596 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1597 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1598
1599 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1600 AssertRC(rc);
1601 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1602 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1604 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1605
1606 NOREF(pcszVmcs);
1607 return VINF_SUCCESS;
1608}
1609
1610
1611/**
1612 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1613 * VMCS.
1614 *
1615 * This is typically required when the guest changes paging mode.
1616 *
1617 * @returns VBox status code.
1618 * @param pVCpu The cross context virtual CPU structure.
1619 * @param pVmxTransient The VMX-transient structure.
1620 *
1621 * @remarks Requires EFER.
1622 * @remarks No-long-jump zone!!!
1623 */
1624static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1625{
1626 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1627 {
1628 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1629 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1630
1631 /*
1632 * VM-entry controls.
1633 */
1634 {
1635 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1636 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1637
1638 /*
1639 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1640 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1641 *
1642 * For nested-guests, this is a mandatory VM-entry control. It's also
1643 * required because we do not want to leak host bits to the nested-guest.
1644 */
1645 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1646
1647 /*
1648 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1649 *
1650 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1651 * required to get the nested-guest working with hardware-assisted VMX execution.
1652 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1653 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1654 * here rather than while merging the guest VMCS controls.
1655 */
1656 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1657 {
1658 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1659 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1660 }
1661 else
1662 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1663
1664 /*
1665 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1666 *
1667 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1668 * regardless of whether the nested-guest VMCS specifies it because we are free to
1669 * load whatever MSRs we require and we do not need to modify the guest visible copy
1670 * of the VM-entry MSR load area.
1671 */
1672 if ( g_fHmVmxSupportsVmcsEfer
1673#ifndef IN_NEM_DARWIN
1674 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1675#endif
1676 )
1677 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1678 else
1679 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1680
1681 /*
1682 * The following should -not- be set (since we're not in SMM mode):
1683 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1684 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1685 */
1686
1687 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1688 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1689
1690 if ((fVal & fZap) == fVal)
1691 { /* likely */ }
1692 else
1693 {
1694 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1695 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1696 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1697 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1698 }
1699
1700 /* Commit it to the VMCS. */
1701 if (pVmcsInfo->u32EntryCtls != fVal)
1702 {
1703 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1704 AssertRC(rc);
1705 pVmcsInfo->u32EntryCtls = fVal;
1706 }
1707 }
1708
1709 /*
1710 * VM-exit controls.
1711 */
1712 {
1713 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1714 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1715
1716 /*
1717 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1718 * supported the 1-setting of this bit.
1719 *
1720 * For nested-guests, we set the "save debug controls" as the converse
1721 * "load debug controls" is mandatory for nested-guests anyway.
1722 */
1723 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1724
1725 /*
1726 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1727 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1728 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1729 * vmxHCExportHostMsrs().
1730 *
1731 * For nested-guests, we always set this bit as we do not support 32-bit
1732 * hosts.
1733 */
1734 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1735
1736#ifndef IN_NEM_DARWIN
1737 /*
1738 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1739 *
1740 * For nested-guests, we should use the "save IA32_EFER" control if we also
1741 * used the "load IA32_EFER" control while exporting VM-entry controls.
1742 */
1743 if ( g_fHmVmxSupportsVmcsEfer
1744 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1745 {
1746 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1747 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1748 }
1749#endif
1750
1751 /*
1752 * Enable saving of the VMX-preemption timer value on VM-exit.
1753 * For nested-guests, currently not exposed/used.
1754 */
1755 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1756 * the timer value. */
1757 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1758 {
1759 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1760 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1761 }
1762
1763 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1764 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1765
1766 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1767 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1768 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1769
1770 if ((fVal & fZap) == fVal)
1771 { /* likely */ }
1772 else
1773 {
1774 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1775 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1776 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1777 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1778 }
1779
1780 /* Commit it to the VMCS. */
1781 if (pVmcsInfo->u32ExitCtls != fVal)
1782 {
1783 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1784 AssertRC(rc);
1785 pVmcsInfo->u32ExitCtls = fVal;
1786 }
1787 }
1788
1789 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1790 }
1791 return VINF_SUCCESS;
1792}
1793
1794
1795/**
1796 * Sets the TPR threshold in the VMCS.
1797 *
1798 * @param pVCpu The cross context virtual CPU structure.
1799 * @param pVmcsInfo The VMCS info. object.
1800 * @param u32TprThreshold The TPR threshold (task-priority class only).
1801 */
1802DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1803{
1804 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1805 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1806 RT_NOREF(pVmcsInfo);
1807 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1808 AssertRC(rc);
1809}
1810
1811
1812/**
1813 * Exports the guest APIC TPR state into the VMCS.
1814 *
1815 * @param pVCpu The cross context virtual CPU structure.
1816 * @param pVmxTransient The VMX-transient structure.
1817 *
1818 * @remarks No-long-jump zone!!!
1819 */
1820static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1821{
1822 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1823 {
1824 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1825
1826 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1827 if (!pVmxTransient->fIsNestedGuest)
1828 {
1829 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1830 && APICIsEnabled(pVCpu))
1831 {
1832 /*
1833 * Setup TPR shadowing.
1834 */
1835 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1836 {
1837 bool fPendingIntr = false;
1838 uint8_t u8Tpr = 0;
1839 uint8_t u8PendingIntr = 0;
1840 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1841 AssertRC(rc);
1842
1843 /*
1844 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1845 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1846 * priority of the pending interrupt so we can deliver the interrupt. If there
1847 * are no interrupts pending, set threshold to 0 to not cause any
1848 * TPR-below-threshold VM-exits.
1849 */
1850 uint32_t u32TprThreshold = 0;
1851 if (fPendingIntr)
1852 {
1853 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1854 (which is the Task-Priority Class). */
1855 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1856 const uint8_t u8TprPriority = u8Tpr >> 4;
1857 if (u8PendingPriority <= u8TprPriority)
1858 u32TprThreshold = u8PendingPriority;
1859 }
1860
1861 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1862 }
1863 }
1864 }
1865 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1867 }
1868}
1869
1870
1871/**
1872 * Gets the guest interruptibility-state and updates related force-flags.
1873 *
1874 * @returns Guest's interruptibility-state.
1875 * @param pVCpu The cross context virtual CPU structure.
1876 *
1877 * @remarks No-long-jump zone!!!
1878 */
1879static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1880{
1881 /*
1882 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1883 */
1884 uint32_t fIntrState = 0;
1885 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1886 {
1887 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1888 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1889
1890 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1891 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1892 {
1893 if (pCtx->eflags.Bits.u1IF)
1894 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1895 else
1896 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1897 }
1898 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1899 {
1900 /*
1901 * We can clear the inhibit force flag as even if we go back to the recompiler
1902 * without executing guest code in VT-x, the flag's condition to be cleared is
1903 * met and thus the cleared state is correct.
1904 */
1905 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1906 }
1907 }
1908
1909 /*
1910 * Check if we should inhibit NMI delivery.
1911 */
1912 if (CPUMIsGuestNmiBlocking(pVCpu))
1913 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1914
1915 /*
1916 * Validate.
1917 */
1918#ifdef VBOX_STRICT
1919 /* We don't support block-by-SMI yet.*/
1920 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1921
1922 /* Block-by-STI must not be set when interrupts are disabled. */
1923 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1924 {
1925 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1926 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1927 }
1928#endif
1929
1930 return fIntrState;
1931}
1932
1933
1934/**
1935 * Exports the exception intercepts required for guest execution in the VMCS.
1936 *
1937 * @param pVCpu The cross context virtual CPU structure.
1938 * @param pVmxTransient The VMX-transient structure.
1939 *
1940 * @remarks No-long-jump zone!!!
1941 */
1942static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1943{
1944 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1945 {
1946 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1947 if ( !pVmxTransient->fIsNestedGuest
1948 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1949 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1950 else
1951 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1952
1953 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1954 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1955 }
1956}
1957
1958
1959/**
1960 * Exports the guest's RIP into the guest-state area in the VMCS.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 *
1964 * @remarks No-long-jump zone!!!
1965 */
1966static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1967{
1968 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1969 {
1970 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1971
1972 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1973 AssertRC(rc);
1974
1975 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1976 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1977 }
1978}
1979
1980
1981/**
1982 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1983 *
1984 * @param pVCpu The cross context virtual CPU structure.
1985 * @param pVmxTransient The VMX-transient structure.
1986 *
1987 * @remarks No-long-jump zone!!!
1988 */
1989static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1990{
1991 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1992 {
1993 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1994
1995 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1996 Let us assert it as such and use 32-bit VMWRITE. */
1997 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1998 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1999 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
2000 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
2001
2002#ifndef IN_NEM_DARWIN
2003 /*
2004 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
2005 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
2006 * can run the real-mode guest code under Virtual 8086 mode.
2007 */
2008 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
2009 if (pVmcsInfo->RealMode.fRealOnV86Active)
2010 {
2011 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2012 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2013 Assert(!pVmxTransient->fIsNestedGuest);
2014 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
2015 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2016 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2017 }
2018#else
2019 RT_NOREF(pVmxTransient);
2020#endif
2021
2022 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
2023 AssertRC(rc);
2024
2025 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
2026 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
2027 }
2028}
2029
2030
2031#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2032/**
2033 * Copies the nested-guest VMCS to the shadow VMCS.
2034 *
2035 * @returns VBox status code.
2036 * @param pVCpu The cross context virtual CPU structure.
2037 * @param pVmcsInfo The VMCS info. object.
2038 *
2039 * @remarks No-long-jump zone!!!
2040 */
2041static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2042{
2043 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
2044 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
2045
2046 /*
2047 * Disable interrupts so we don't get preempted while the shadow VMCS is the
2048 * current VMCS, as we may try saving guest lazy MSRs.
2049 *
2050 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
2051 * calling the import VMCS code which is currently performing the guest MSR reads
2052 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
2053 * and the rest of the VMX leave session machinery.
2054 */
2055 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2056
2057 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
2058 if (RT_SUCCESS(rc))
2059 {
2060 /*
2061 * Copy all guest read/write VMCS fields.
2062 *
2063 * We don't check for VMWRITE failures here for performance reasons and
2064 * because they are not expected to fail, barring irrecoverable conditions
2065 * like hardware errors.
2066 */
2067 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
2068 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
2069 {
2070 uint64_t u64Val;
2071 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
2072 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
2073 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
2074 }
2075
2076 /*
2077 * If the host CPU supports writing all VMCS fields, copy the guest read-only
2078 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
2079 */
2080 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
2081 {
2082 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
2083 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
2084 {
2085 uint64_t u64Val;
2086 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
2087 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
2088 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
2089 }
2090 }
2091
2092 rc = vmxHCClearShadowVmcs(pVmcsInfo);
2093 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
2094 }
2095
2096 ASMSetFlags(fEFlags);
2097 return rc;
2098}
2099
2100
2101/**
2102 * Copies the shadow VMCS to the nested-guest VMCS.
2103 *
2104 * @returns VBox status code.
2105 * @param pVCpu The cross context virtual CPU structure.
2106 * @param pVmcsInfo The VMCS info. object.
2107 *
2108 * @remarks Called with interrupts disabled.
2109 */
2110static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2111{
2112 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2113 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
2114 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
2115
2116 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
2117 if (RT_SUCCESS(rc))
2118 {
2119 /*
2120 * Copy guest read/write fields from the shadow VMCS.
2121 * Guest read-only fields cannot be modified, so no need to copy them.
2122 *
2123 * We don't check for VMREAD failures here for performance reasons and
2124 * because they are not expected to fail, barring irrecoverable conditions
2125 * like hardware errors.
2126 */
2127 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
2128 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
2129 {
2130 uint64_t u64Val;
2131 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
2132 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
2133 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
2134 }
2135
2136 rc = vmxHCClearShadowVmcs(pVmcsInfo);
2137 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
2138 }
2139 return rc;
2140}
2141
2142
2143/**
2144 * Enables VMCS shadowing for the given VMCS info. object.
2145 *
2146 * @param pVCpu The cross context virtual CPU structure.
2147 * @param pVmcsInfo The VMCS info. object.
2148 *
2149 * @remarks No-long-jump zone!!!
2150 */
2151static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2152{
2153 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2154 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2155 {
2156 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2157 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2158 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2159 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2160 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2161 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2162 Log4Func(("Enabled\n"));
2163 }
2164}
2165
2166
2167/**
2168 * Disables VMCS shadowing for the given VMCS info. object.
2169 *
2170 * @param pVCpu The cross context virtual CPU structure.
2171 * @param pVmcsInfo The VMCS info. object.
2172 *
2173 * @remarks No-long-jump zone!!!
2174 */
2175static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2176{
2177 /*
2178 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2179 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2180 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2181 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2182 *
2183 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2184 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2185 */
2186 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2187 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2188 {
2189 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2190 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2191 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2192 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2193 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2194 Log4Func(("Disabled\n"));
2195 }
2196}
2197#endif
2198
2199
2200/**
2201 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2202 *
2203 * The guest FPU state is always pre-loaded hence we don't need to bother about
2204 * sharing FPU related CR0 bits between the guest and host.
2205 *
2206 * @returns VBox status code.
2207 * @param pVCpu The cross context virtual CPU structure.
2208 * @param pVmxTransient The VMX-transient structure.
2209 *
2210 * @remarks No-long-jump zone!!!
2211 */
2212static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2213{
2214 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2215 {
2216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2217 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2218
2219 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2220 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2221 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2222 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2223 else
2224 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2225
2226 if (!pVmxTransient->fIsNestedGuest)
2227 {
2228 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2229 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2230 uint64_t const u64ShadowCr0 = u64GuestCr0;
2231 Assert(!RT_HI_U32(u64GuestCr0));
2232
2233 /*
2234 * Setup VT-x's view of the guest CR0.
2235 */
2236 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2237 if (VM_IS_VMX_NESTED_PAGING(pVM))
2238 {
2239#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2240 if (CPUMIsGuestPagingEnabled(pVCpu))
2241 {
2242 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2243 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2244 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2245 }
2246 else
2247 {
2248 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2249 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2250 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2251 }
2252
2253 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2254 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2255 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2256#endif
2257 }
2258 else
2259 {
2260 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2261 u64GuestCr0 |= X86_CR0_WP;
2262 }
2263
2264 /*
2265 * Guest FPU bits.
2266 *
2267 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2268 * using CR0.TS.
2269 *
2270 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2271 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2272 */
2273 u64GuestCr0 |= X86_CR0_NE;
2274
2275 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2276 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2277
2278 /*
2279 * Update exception intercepts.
2280 */
2281 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2282#ifndef IN_NEM_DARWIN
2283 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2284 {
2285 Assert(PDMVmmDevHeapIsEnabled(pVM));
2286 Assert(pVM->hm.s.vmx.pRealModeTSS);
2287 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2288 }
2289 else
2290#endif
2291 {
2292 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2293 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2294 if (fInterceptMF)
2295 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2296 }
2297
2298 /* Additional intercepts for debugging, define these yourself explicitly. */
2299#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2300 uXcptBitmap |= 0
2301 | RT_BIT(X86_XCPT_BP)
2302 | RT_BIT(X86_XCPT_DE)
2303 | RT_BIT(X86_XCPT_NM)
2304 | RT_BIT(X86_XCPT_TS)
2305 | RT_BIT(X86_XCPT_UD)
2306 | RT_BIT(X86_XCPT_NP)
2307 | RT_BIT(X86_XCPT_SS)
2308 | RT_BIT(X86_XCPT_GP)
2309 | RT_BIT(X86_XCPT_PF)
2310 | RT_BIT(X86_XCPT_MF)
2311 ;
2312#elif defined(HMVMX_ALWAYS_TRAP_PF)
2313 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2314#endif
2315 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2316 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2317 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2318 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2319 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2320
2321 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2322 u64GuestCr0 |= fSetCr0;
2323 u64GuestCr0 &= fZapCr0;
2324 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2325
2326 /* Commit the CR0 and related fields to the guest VMCS. */
2327 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2328 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2329 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2330 {
2331 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2332 AssertRC(rc);
2333 }
2334 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2335 {
2336 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2337 AssertRC(rc);
2338 }
2339
2340 /* Update our caches. */
2341 pVmcsInfo->u32ProcCtls = uProcCtls;
2342 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2343
2344 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2345 }
2346 else
2347 {
2348 /*
2349 * With nested-guests, we may have extended the guest/host mask here since we
2350 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2351 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2352 * originally supplied. We must copy those bits from the nested-guest CR0 into
2353 * the nested-guest CR0 read-shadow.
2354 */
2355 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2356 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2357 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2358 Assert(!RT_HI_U32(u64GuestCr0));
2359 Assert(u64GuestCr0 & X86_CR0_NE);
2360
2361 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2362 u64GuestCr0 |= fSetCr0;
2363 u64GuestCr0 &= fZapCr0;
2364 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2365
2366 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2367 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2368 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2369
2370 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2371 }
2372
2373 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2374 }
2375
2376 return VINF_SUCCESS;
2377}
2378
2379
2380/**
2381 * Exports the guest control registers (CR3, CR4) into the guest-state area
2382 * in the VMCS.
2383 *
2384 * @returns VBox strict status code.
2385 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2386 * without unrestricted guest access and the VMMDev is not presently
2387 * mapped (e.g. EFI32).
2388 *
2389 * @param pVCpu The cross context virtual CPU structure.
2390 * @param pVmxTransient The VMX-transient structure.
2391 *
2392 * @remarks No-long-jump zone!!!
2393 */
2394static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2395{
2396 int rc = VINF_SUCCESS;
2397 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2398
2399 /*
2400 * Guest CR2.
2401 * It's always loaded in the assembler code. Nothing to do here.
2402 */
2403
2404 /*
2405 * Guest CR3.
2406 */
2407 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2408 {
2409 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2410
2411 if (VM_IS_VMX_NESTED_PAGING(pVM))
2412 {
2413#ifndef IN_NEM_DARWIN
2414 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2415 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2416
2417 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2418 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2419 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2420 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2421
2422 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2423 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2424 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2425
2426 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2427 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2428 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2429 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2430 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2431 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2432 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2433
2434 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2435 AssertRC(rc);
2436#endif
2437
2438 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2439 uint64_t u64GuestCr3 = pCtx->cr3;
2440 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2441 || CPUMIsGuestPagingEnabledEx(pCtx))
2442 {
2443 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2444 if (CPUMIsGuestInPAEModeEx(pCtx))
2445 {
2446 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2447 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2448 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2449 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2450 }
2451
2452 /*
2453 * The guest's view of its CR3 is unblemished with nested paging when the
2454 * guest is using paging or we have unrestricted guest execution to handle
2455 * the guest when it's not using paging.
2456 */
2457 }
2458#ifndef IN_NEM_DARWIN
2459 else
2460 {
2461 /*
2462 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2463 * thinks it accesses physical memory directly, we use our identity-mapped
2464 * page table to map guest-linear to guest-physical addresses. EPT takes care
2465 * of translating it to host-physical addresses.
2466 */
2467 RTGCPHYS GCPhys;
2468 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2469
2470 /* We obtain it here every time as the guest could have relocated this PCI region. */
2471 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2472 if (RT_SUCCESS(rc))
2473 { /* likely */ }
2474 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2475 {
2476 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2477 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2478 }
2479 else
2480 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2481
2482 u64GuestCr3 = GCPhys;
2483 }
2484#endif
2485
2486 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2487 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2488 AssertRC(rc);
2489 }
2490 else
2491 {
2492 Assert(!pVmxTransient->fIsNestedGuest);
2493 /* Non-nested paging case, just use the hypervisor's CR3. */
2494 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2495
2496 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2497 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2498 AssertRC(rc);
2499 }
2500
2501 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2502 }
2503
2504 /*
2505 * Guest CR4.
2506 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2507 */
2508 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2509 {
2510 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2511 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2512
2513 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2514 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2515
2516 /*
2517 * With nested-guests, we may have extended the guest/host mask here (since we
2518 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2519 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2520 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2521 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2522 */
2523 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2524 uint64_t u64GuestCr4 = pCtx->cr4;
2525 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2526 ? pCtx->cr4
2527 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2528 Assert(!RT_HI_U32(u64GuestCr4));
2529
2530#ifndef IN_NEM_DARWIN
2531 /*
2532 * Setup VT-x's view of the guest CR4.
2533 *
2534 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2535 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2536 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2537 *
2538 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2539 */
2540 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2541 {
2542 Assert(pVM->hm.s.vmx.pRealModeTSS);
2543 Assert(PDMVmmDevHeapIsEnabled(pVM));
2544 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2545 }
2546#endif
2547
2548 if (VM_IS_VMX_NESTED_PAGING(pVM))
2549 {
2550 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2551 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2552 {
2553 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2554 u64GuestCr4 |= X86_CR4_PSE;
2555 /* Our identity mapping is a 32-bit page directory. */
2556 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2557 }
2558 /* else use guest CR4.*/
2559 }
2560 else
2561 {
2562 Assert(!pVmxTransient->fIsNestedGuest);
2563
2564 /*
2565 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2566 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2567 */
2568 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2569 {
2570 case PGMMODE_REAL: /* Real-mode. */
2571 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2572 case PGMMODE_32_BIT: /* 32-bit paging. */
2573 {
2574 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2575 break;
2576 }
2577
2578 case PGMMODE_PAE: /* PAE paging. */
2579 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2580 {
2581 u64GuestCr4 |= X86_CR4_PAE;
2582 break;
2583 }
2584
2585 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2586 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2587 {
2588#ifdef VBOX_WITH_64_BITS_GUESTS
2589 /* For our assumption in vmxHCShouldSwapEferMsr. */
2590 Assert(u64GuestCr4 & X86_CR4_PAE);
2591 break;
2592#endif
2593 }
2594 default:
2595 AssertFailed();
2596 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2597 }
2598 }
2599
2600 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2601 u64GuestCr4 |= fSetCr4;
2602 u64GuestCr4 &= fZapCr4;
2603
2604 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2605 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2606 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2607
2608#ifndef IN_NEM_DARWIN
2609 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2610 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2611 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2612 {
2613 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2614 hmR0VmxUpdateStartVmFunction(pVCpu);
2615 }
2616#endif
2617
2618 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2619
2620 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2621 }
2622 return rc;
2623}
2624
2625
2626#ifdef VBOX_STRICT
2627/**
2628 * Strict function to validate segment registers.
2629 *
2630 * @param pVCpu The cross context virtual CPU structure.
2631 * @param pVmcsInfo The VMCS info. object.
2632 *
2633 * @remarks Will import guest CR0 on strict builds during validation of
2634 * segments.
2635 */
2636static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2637{
2638 /*
2639 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2640 *
2641 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2642 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2643 * unusable bit and doesn't change the guest-context value.
2644 */
2645 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2646 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2647 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2648 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2649 && ( !CPUMIsGuestInRealModeEx(pCtx)
2650 && !CPUMIsGuestInV86ModeEx(pCtx)))
2651 {
2652 /* Protected mode checks */
2653 /* CS */
2654 Assert(pCtx->cs.Attr.n.u1Present);
2655 Assert(!(pCtx->cs.Attr.u & 0xf00));
2656 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2657 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2658 || !(pCtx->cs.Attr.n.u1Granularity));
2659 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2660 || (pCtx->cs.Attr.n.u1Granularity));
2661 /* CS cannot be loaded with NULL in protected mode. */
2662 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2663 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2664 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2665 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2666 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2667 else
2668 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2669 /* SS */
2670 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2671 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2672 if ( !(pCtx->cr0 & X86_CR0_PE)
2673 || pCtx->cs.Attr.n.u4Type == 3)
2674 {
2675 Assert(!pCtx->ss.Attr.n.u2Dpl);
2676 }
2677 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2678 {
2679 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2680 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2681 Assert(pCtx->ss.Attr.n.u1Present);
2682 Assert(!(pCtx->ss.Attr.u & 0xf00));
2683 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2684 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2685 || !(pCtx->ss.Attr.n.u1Granularity));
2686 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2687 || (pCtx->ss.Attr.n.u1Granularity));
2688 }
2689 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2690 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2691 {
2692 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2693 Assert(pCtx->ds.Attr.n.u1Present);
2694 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2695 Assert(!(pCtx->ds.Attr.u & 0xf00));
2696 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2697 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2698 || !(pCtx->ds.Attr.n.u1Granularity));
2699 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2700 || (pCtx->ds.Attr.n.u1Granularity));
2701 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2702 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2703 }
2704 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2705 {
2706 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2707 Assert(pCtx->es.Attr.n.u1Present);
2708 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2709 Assert(!(pCtx->es.Attr.u & 0xf00));
2710 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2711 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2712 || !(pCtx->es.Attr.n.u1Granularity));
2713 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2714 || (pCtx->es.Attr.n.u1Granularity));
2715 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2716 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2717 }
2718 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2719 {
2720 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2721 Assert(pCtx->fs.Attr.n.u1Present);
2722 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2723 Assert(!(pCtx->fs.Attr.u & 0xf00));
2724 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2725 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2726 || !(pCtx->fs.Attr.n.u1Granularity));
2727 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2728 || (pCtx->fs.Attr.n.u1Granularity));
2729 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2730 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2731 }
2732 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2733 {
2734 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2735 Assert(pCtx->gs.Attr.n.u1Present);
2736 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2737 Assert(!(pCtx->gs.Attr.u & 0xf00));
2738 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2739 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2740 || !(pCtx->gs.Attr.n.u1Granularity));
2741 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2742 || (pCtx->gs.Attr.n.u1Granularity));
2743 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2744 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2745 }
2746 /* 64-bit capable CPUs. */
2747 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2748 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2749 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2750 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2751 }
2752 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2753 || ( CPUMIsGuestInRealModeEx(pCtx)
2754 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2755 {
2756 /* Real and v86 mode checks. */
2757 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2758 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2759#ifndef IN_NEM_DARWIN
2760 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2761 {
2762 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2763 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2764 }
2765 else
2766#endif
2767 {
2768 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2769 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2770 }
2771
2772 /* CS */
2773 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2774 Assert(pCtx->cs.u32Limit == 0xffff);
2775 Assert(u32CSAttr == 0xf3);
2776 /* SS */
2777 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2778 Assert(pCtx->ss.u32Limit == 0xffff);
2779 Assert(u32SSAttr == 0xf3);
2780 /* DS */
2781 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2782 Assert(pCtx->ds.u32Limit == 0xffff);
2783 Assert(u32DSAttr == 0xf3);
2784 /* ES */
2785 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2786 Assert(pCtx->es.u32Limit == 0xffff);
2787 Assert(u32ESAttr == 0xf3);
2788 /* FS */
2789 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2790 Assert(pCtx->fs.u32Limit == 0xffff);
2791 Assert(u32FSAttr == 0xf3);
2792 /* GS */
2793 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2794 Assert(pCtx->gs.u32Limit == 0xffff);
2795 Assert(u32GSAttr == 0xf3);
2796 /* 64-bit capable CPUs. */
2797 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2798 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2799 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2800 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2801 }
2802}
2803#endif /* VBOX_STRICT */
2804
2805
2806/**
2807 * Exports a guest segment register into the guest-state area in the VMCS.
2808 *
2809 * @returns VBox status code.
2810 * @param pVCpu The cross context virtual CPU structure.
2811 * @param pVmcsInfo The VMCS info. object.
2812 * @param iSegReg The segment register number (X86_SREG_XXX).
2813 * @param pSelReg Pointer to the segment selector.
2814 *
2815 * @remarks No-long-jump zone!!!
2816 */
2817static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2818{
2819 Assert(iSegReg < X86_SREG_COUNT);
2820
2821 uint32_t u32Access = pSelReg->Attr.u;
2822#ifndef IN_NEM_DARWIN
2823 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2824#endif
2825 {
2826 /*
2827 * The way to differentiate between whether this is really a null selector or was just
2828 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2829 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2830 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2831 * NULL selectors loaded in protected-mode have their attribute as 0.
2832 */
2833 if (u32Access)
2834 { }
2835 else
2836 u32Access = X86DESCATTR_UNUSABLE;
2837 }
2838#ifndef IN_NEM_DARWIN
2839 else
2840 {
2841 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2842 u32Access = 0xf3;
2843 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2844 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2845 RT_NOREF_PV(pVCpu);
2846 }
2847#else
2848 RT_NOREF(pVmcsInfo);
2849#endif
2850
2851 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2852 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2853 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2854
2855 /*
2856 * Commit it to the VMCS.
2857 */
2858 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2859 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2860 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2861 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2862 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2865 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2866 return VINF_SUCCESS;
2867}
2868
2869
2870/**
2871 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2872 * area in the VMCS.
2873 *
2874 * @returns VBox status code.
2875 * @param pVCpu The cross context virtual CPU structure.
2876 * @param pVmxTransient The VMX-transient structure.
2877 *
2878 * @remarks Will import guest CR0 on strict builds during validation of
2879 * segments.
2880 * @remarks No-long-jump zone!!!
2881 */
2882static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2883{
2884 int rc = VERR_INTERNAL_ERROR_5;
2885#ifndef IN_NEM_DARWIN
2886 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2887#endif
2888 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2889 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2890#ifndef IN_NEM_DARWIN
2891 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2892#endif
2893
2894 /*
2895 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2896 */
2897 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2898 {
2899 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2900 {
2901 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2902#ifndef IN_NEM_DARWIN
2903 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2904 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2905#endif
2906 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2907 AssertRC(rc);
2908 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2909 }
2910
2911 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2912 {
2913 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2914#ifndef IN_NEM_DARWIN
2915 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2916 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2917#endif
2918 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2919 AssertRC(rc);
2920 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2921 }
2922
2923 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2924 {
2925 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2926#ifndef IN_NEM_DARWIN
2927 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2928 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2929#endif
2930 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2931 AssertRC(rc);
2932 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2933 }
2934
2935 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2936 {
2937 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2938#ifndef IN_NEM_DARWIN
2939 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2940 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2941#endif
2942 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2943 AssertRC(rc);
2944 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2945 }
2946
2947 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2948 {
2949 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2950#ifndef IN_NEM_DARWIN
2951 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2952 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2953#endif
2954 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2955 AssertRC(rc);
2956 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2957 }
2958
2959 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2960 {
2961 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2962#ifndef IN_NEM_DARWIN
2963 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2964 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2965#endif
2966 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2967 AssertRC(rc);
2968 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2969 }
2970
2971#ifdef VBOX_STRICT
2972 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2973#endif
2974 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2975 pCtx->cs.Attr.u));
2976 }
2977
2978 /*
2979 * Guest TR.
2980 */
2981 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2982 {
2983 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2984
2985 /*
2986 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2987 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2988 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2989 */
2990 uint16_t u16Sel;
2991 uint32_t u32Limit;
2992 uint64_t u64Base;
2993 uint32_t u32AccessRights;
2994#ifndef IN_NEM_DARWIN
2995 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2996#endif
2997 {
2998 u16Sel = pCtx->tr.Sel;
2999 u32Limit = pCtx->tr.u32Limit;
3000 u64Base = pCtx->tr.u64Base;
3001 u32AccessRights = pCtx->tr.Attr.u;
3002 }
3003#ifndef IN_NEM_DARWIN
3004 else
3005 {
3006 Assert(!pVmxTransient->fIsNestedGuest);
3007 Assert(pVM->hm.s.vmx.pRealModeTSS);
3008 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
3009
3010 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3011 RTGCPHYS GCPhys;
3012 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3013 AssertRCReturn(rc, rc);
3014
3015 X86DESCATTR DescAttr;
3016 DescAttr.u = 0;
3017 DescAttr.n.u1Present = 1;
3018 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3019
3020 u16Sel = 0;
3021 u32Limit = HM_VTX_TSS_SIZE;
3022 u64Base = GCPhys;
3023 u32AccessRights = DescAttr.u;
3024 }
3025#endif
3026
3027 /* Validate. */
3028 Assert(!(u16Sel & RT_BIT(2)));
3029 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3030 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3031 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3032 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3033 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3034 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3035 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3036 Assert( (u32Limit & 0xfff) == 0xfff
3037 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3038 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
3039 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3040
3041 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
3042 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
3043 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
3044 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
3045
3046 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
3047 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
3048 }
3049
3050 /*
3051 * Guest GDTR.
3052 */
3053 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
3054 {
3055 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
3056
3057 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
3058 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
3059
3060 /* Validate. */
3061 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3062
3063 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
3064 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
3065 }
3066
3067 /*
3068 * Guest LDTR.
3069 */
3070 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
3071 {
3072 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
3073
3074 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3075 uint32_t u32Access;
3076 if ( !pVmxTransient->fIsNestedGuest
3077 && !pCtx->ldtr.Attr.u)
3078 u32Access = X86DESCATTR_UNUSABLE;
3079 else
3080 u32Access = pCtx->ldtr.Attr.u;
3081
3082 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
3083 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
3084 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
3085 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
3086
3087 /* Validate. */
3088 if (!(u32Access & X86DESCATTR_UNUSABLE))
3089 {
3090 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3091 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3092 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3093 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3094 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3095 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3096 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
3097 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3098 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
3099 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3100 }
3101
3102 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
3103 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
3104 }
3105
3106 /*
3107 * Guest IDTR.
3108 */
3109 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
3110 {
3111 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
3112
3113 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
3114 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
3115
3116 /* Validate. */
3117 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3118
3119 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
3120 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
3121 }
3122
3123 return VINF_SUCCESS;
3124}
3125
3126
3127/**
3128 * Gets the IEM exception flags for the specified vector and IDT vectoring /
3129 * VM-exit interruption info type.
3130 *
3131 * @returns The IEM exception flags.
3132 * @param uVector The event vector.
3133 * @param uVmxEventType The VMX event type.
3134 *
3135 * @remarks This function currently only constructs flags required for
3136 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
3137 * and CR2 aspects of an exception are not included).
3138 */
3139static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
3140{
3141 uint32_t fIemXcptFlags;
3142 switch (uVmxEventType)
3143 {
3144 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
3145 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
3146 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
3147 break;
3148
3149 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
3150 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
3151 break;
3152
3153 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3154 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3155 break;
3156
3157 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3158 {
3159 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3160 if (uVector == X86_XCPT_BP)
3161 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3162 else if (uVector == X86_XCPT_OF)
3163 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3164 else
3165 {
3166 fIemXcptFlags = 0;
3167 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3168 }
3169 break;
3170 }
3171
3172 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3173 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3174 break;
3175
3176 default:
3177 fIemXcptFlags = 0;
3178 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3179 break;
3180 }
3181 return fIemXcptFlags;
3182}
3183
3184
3185/**
3186 * Sets an event as a pending event to be injected into the guest.
3187 *
3188 * @param pVCpu The cross context virtual CPU structure.
3189 * @param u32IntInfo The VM-entry interruption-information field.
3190 * @param cbInstr The VM-entry instruction length in bytes (for
3191 * software interrupts, exceptions and privileged
3192 * software exceptions).
3193 * @param u32ErrCode The VM-entry exception error code.
3194 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3195 * page-fault.
3196 */
3197DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3198 RTGCUINTPTR GCPtrFaultAddress)
3199{
3200 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3201 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3202 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3203 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3204 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3205 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3206}
3207
3208
3209/**
3210 * Sets an external interrupt as pending-for-injection into the VM.
3211 *
3212 * @param pVCpu The cross context virtual CPU structure.
3213 * @param u8Interrupt The external interrupt vector.
3214 */
3215DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3216{
3217 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3218 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3219 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3220 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3221 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3222}
3223
3224
3225/**
3226 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3227 *
3228 * @param pVCpu The cross context virtual CPU structure.
3229 */
3230DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3231{
3232 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3233 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3234 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3235 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3236 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3237}
3238
3239
3240/**
3241 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3242 *
3243 * @param pVCpu The cross context virtual CPU structure.
3244 */
3245DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3246{
3247 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3248 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3249 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3250 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3251 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3252}
3253
3254
3255/**
3256 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3257 *
3258 * @param pVCpu The cross context virtual CPU structure.
3259 */
3260DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3261{
3262 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3263 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3264 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3265 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3266 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3267}
3268
3269
3270/**
3271 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3272 *
3273 * @param pVCpu The cross context virtual CPU structure.
3274 */
3275DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3276{
3277 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3278 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3279 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3280 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3281 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3282}
3283
3284
3285#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3286/**
3287 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3288 *
3289 * @param pVCpu The cross context virtual CPU structure.
3290 * @param u32ErrCode The error code for the general-protection exception.
3291 */
3292DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3293{
3294 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3295 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3296 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3297 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3298 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3299}
3300
3301
3302/**
3303 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3304 *
3305 * @param pVCpu The cross context virtual CPU structure.
3306 * @param u32ErrCode The error code for the stack exception.
3307 */
3308DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3309{
3310 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3311 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3312 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3313 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3314 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3315}
3316#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3317
3318
3319/**
3320 * Fixes up attributes for the specified segment register.
3321 *
3322 * @param pVCpu The cross context virtual CPU structure.
3323 * @param pSelReg The segment register that needs fixing.
3324 * @param pszRegName The register name (for logging and assertions).
3325 */
3326static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3327{
3328 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3329
3330 /*
3331 * If VT-x marks the segment as unusable, most other bits remain undefined:
3332 * - For CS the L, D and G bits have meaning.
3333 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3334 * - For the remaining data segments no bits are defined.
3335 *
3336 * The present bit and the unusable bit has been observed to be set at the
3337 * same time (the selector was supposed to be invalid as we started executing
3338 * a V8086 interrupt in ring-0).
3339 *
3340 * What should be important for the rest of the VBox code, is that the P bit is
3341 * cleared. Some of the other VBox code recognizes the unusable bit, but
3342 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3343 * safe side here, we'll strip off P and other bits we don't care about. If
3344 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3345 *
3346 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3347 */
3348#ifdef VBOX_STRICT
3349 uint32_t const uAttr = pSelReg->Attr.u;
3350#endif
3351
3352 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3353 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3354 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3355
3356#ifdef VBOX_STRICT
3357# ifndef IN_NEM_DARWIN
3358 VMMRZCallRing3Disable(pVCpu);
3359# endif
3360 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3361# ifdef DEBUG_bird
3362 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3363 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3364 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3365# endif
3366# ifndef IN_NEM_DARWIN
3367 VMMRZCallRing3Enable(pVCpu);
3368# endif
3369 NOREF(uAttr);
3370#endif
3371 RT_NOREF2(pVCpu, pszRegName);
3372}
3373
3374
3375/**
3376 * Imports a guest segment register from the current VMCS into the guest-CPU
3377 * context.
3378 *
3379 * @param pVCpu The cross context virtual CPU structure.
3380 * @param iSegReg The segment register number (X86_SREG_XXX).
3381 *
3382 * @remarks Called with interrupts and/or preemption disabled.
3383 */
3384static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3385{
3386 Assert(iSegReg < X86_SREG_COUNT);
3387 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3388 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3389 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3390 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3391
3392 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3393
3394 uint16_t u16Sel;
3395 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3396 pSelReg->Sel = u16Sel;
3397 pSelReg->ValidSel = u16Sel;
3398
3399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3400 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3401
3402 uint32_t u32Attr;
3403 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3404 pSelReg->Attr.u = u32Attr;
3405 if (u32Attr & X86DESCATTR_UNUSABLE)
3406 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3407
3408 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3409}
3410
3411
3412/**
3413 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3414 *
3415 * @param pVCpu The cross context virtual CPU structure.
3416 *
3417 * @remarks Called with interrupts and/or preemption disabled.
3418 */
3419static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3420{
3421 uint16_t u16Sel;
3422 uint64_t u64Base;
3423 uint32_t u32Limit, u32Attr;
3424 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3425 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3426 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3427 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3428
3429 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3430 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3431 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3432 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3433 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3434 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3435 if (u32Attr & X86DESCATTR_UNUSABLE)
3436 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3437}
3438
3439
3440/**
3441 * Imports the guest TR from the current VMCS into the guest-CPU context.
3442 *
3443 * @param pVCpu The cross context virtual CPU structure.
3444 *
3445 * @remarks Called with interrupts and/or preemption disabled.
3446 */
3447static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3448{
3449 uint16_t u16Sel;
3450 uint64_t u64Base;
3451 uint32_t u32Limit, u32Attr;
3452 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3453 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3454 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3455 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3456
3457 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3458 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3459 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3460 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3461 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3462 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3463 /* TR is the only selector that can never be unusable. */
3464 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3465}
3466
3467
3468/**
3469 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3470 *
3471 * @param pVCpu The cross context virtual CPU structure.
3472 *
3473 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3474 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3475 * instead!!!
3476 */
3477static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3478{
3479 uint64_t u64Val;
3480 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3481 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3482 {
3483 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3484 AssertRC(rc);
3485
3486 pCtx->rip = u64Val;
3487 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3488 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3489 }
3490}
3491
3492
3493/**
3494 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3495 *
3496 * @param pVCpu The cross context virtual CPU structure.
3497 * @param pVmcsInfo The VMCS info. object.
3498 *
3499 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3500 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3501 * instead!!!
3502 */
3503static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3504{
3505 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3506 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3507 {
3508 uint64_t u64Val;
3509 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3510 AssertRC(rc);
3511
3512 pCtx->rflags.u64 = u64Val;
3513#ifndef IN_NEM_DARWIN
3514 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3515 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3516 {
3517 pCtx->eflags.Bits.u1VM = 0;
3518 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3519 }
3520#else
3521 RT_NOREF(pVmcsInfo);
3522#endif
3523 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3524 }
3525}
3526
3527
3528/**
3529 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3530 * context.
3531 *
3532 * @param pVCpu The cross context virtual CPU structure.
3533 * @param pVmcsInfo The VMCS info. object.
3534 *
3535 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3536 * do not log!
3537 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3538 * instead!!!
3539 */
3540static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3541{
3542 uint32_t u32Val;
3543 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3544 if (!u32Val)
3545 {
3546 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3547 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3548 CPUMSetGuestNmiBlocking(pVCpu, false);
3549 }
3550 else
3551 {
3552 /*
3553 * We must import RIP here to set our EM interrupt-inhibited state.
3554 * We also import RFLAGS as our code that evaluates pending interrupts
3555 * before VM-entry requires it.
3556 */
3557 vmxHCImportGuestRip(pVCpu);
3558 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3559
3560 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3561 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3562 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3563 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3564
3565 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3566 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3567 }
3568}
3569
3570
3571/**
3572 * Worker for VMXR0ImportStateOnDemand.
3573 *
3574 * @returns VBox status code.
3575 * @param pVCpu The cross context virtual CPU structure.
3576 * @param pVmcsInfo The VMCS info. object.
3577 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3578 */
3579static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3580{
3581 int rc = VINF_SUCCESS;
3582 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3583 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3584 uint32_t u32Val;
3585
3586 /*
3587 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3588 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3589 * neither are other host platforms.
3590 *
3591 * Committing this temporarily as it prevents BSOD.
3592 *
3593 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3594 */
3595# ifdef RT_OS_WINDOWS
3596 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3597 return VERR_HM_IPE_1;
3598# endif
3599
3600 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3601
3602#ifndef IN_NEM_DARWIN
3603 /*
3604 * We disable interrupts to make the updating of the state and in particular
3605 * the fExtrn modification atomic wrt to preemption hooks.
3606 */
3607 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3608#endif
3609
3610 fWhat &= pCtx->fExtrn;
3611 if (fWhat)
3612 {
3613 do
3614 {
3615 if (fWhat & CPUMCTX_EXTRN_RIP)
3616 vmxHCImportGuestRip(pVCpu);
3617
3618 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3619 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3620
3621 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3622 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3623
3624 if (fWhat & CPUMCTX_EXTRN_RSP)
3625 {
3626 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3627 AssertRC(rc);
3628 }
3629
3630 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3631 {
3632 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3633#ifndef IN_NEM_DARWIN
3634 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3635#else
3636 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3637#endif
3638 if (fWhat & CPUMCTX_EXTRN_CS)
3639 {
3640 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3641 vmxHCImportGuestRip(pVCpu);
3642 if (fRealOnV86Active)
3643 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3644 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3645 }
3646 if (fWhat & CPUMCTX_EXTRN_SS)
3647 {
3648 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3649 if (fRealOnV86Active)
3650 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3651 }
3652 if (fWhat & CPUMCTX_EXTRN_DS)
3653 {
3654 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3655 if (fRealOnV86Active)
3656 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3657 }
3658 if (fWhat & CPUMCTX_EXTRN_ES)
3659 {
3660 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3661 if (fRealOnV86Active)
3662 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3663 }
3664 if (fWhat & CPUMCTX_EXTRN_FS)
3665 {
3666 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3667 if (fRealOnV86Active)
3668 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3669 }
3670 if (fWhat & CPUMCTX_EXTRN_GS)
3671 {
3672 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3673 if (fRealOnV86Active)
3674 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3675 }
3676 }
3677
3678 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3679 {
3680 if (fWhat & CPUMCTX_EXTRN_LDTR)
3681 vmxHCImportGuestLdtr(pVCpu);
3682
3683 if (fWhat & CPUMCTX_EXTRN_GDTR)
3684 {
3685 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3686 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3687 pCtx->gdtr.cbGdt = u32Val;
3688 }
3689
3690 /* Guest IDTR. */
3691 if (fWhat & CPUMCTX_EXTRN_IDTR)
3692 {
3693 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3694 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3695 pCtx->idtr.cbIdt = u32Val;
3696 }
3697
3698 /* Guest TR. */
3699 if (fWhat & CPUMCTX_EXTRN_TR)
3700 {
3701#ifndef IN_NEM_DARWIN
3702 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3703 don't need to import that one. */
3704 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3705#endif
3706 vmxHCImportGuestTr(pVCpu);
3707 }
3708 }
3709
3710 if (fWhat & CPUMCTX_EXTRN_DR7)
3711 {
3712#ifndef IN_NEM_DARWIN
3713 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3714#endif
3715 {
3716 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3717 AssertRC(rc);
3718 }
3719 }
3720
3721 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3722 {
3723 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3724 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3725 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3726 pCtx->SysEnter.cs = u32Val;
3727 }
3728
3729#ifndef IN_NEM_DARWIN
3730 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3731 {
3732 if ( pVM->hmr0.s.fAllow64BitGuests
3733 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3734 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3735 }
3736
3737 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3738 {
3739 if ( pVM->hmr0.s.fAllow64BitGuests
3740 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3741 {
3742 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3743 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3744 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3745 }
3746 }
3747
3748 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3749 {
3750 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3751 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3752 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3753 Assert(pMsrs);
3754 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3755 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3756 for (uint32_t i = 0; i < cMsrs; i++)
3757 {
3758 uint32_t const idMsr = pMsrs[i].u32Msr;
3759 switch (idMsr)
3760 {
3761 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3762 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3763 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3764 default:
3765 {
3766 uint32_t idxLbrMsr;
3767 if (VM_IS_VMX_LBR(pVM))
3768 {
3769 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3770 {
3771 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3772 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3773 break;
3774 }
3775 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3776 {
3777 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3778 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3779 break;
3780 }
3781 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3782 {
3783 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3784 break;
3785 }
3786 /* Fallthru (no break) */
3787 }
3788 pCtx->fExtrn = 0;
3789 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3790 ASMSetFlags(fEFlags);
3791 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3792 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3793 }
3794 }
3795 }
3796 }
3797#endif
3798
3799 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3800 {
3801 if (fWhat & CPUMCTX_EXTRN_CR0)
3802 {
3803 uint64_t u64Cr0;
3804 uint64_t u64Shadow;
3805 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3806 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3807#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3808 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3809 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3810#else
3811 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3812 {
3813 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3814 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3815 }
3816 else
3817 {
3818 /*
3819 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3820 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3821 * re-construct CR0. See @bugref{9180#c95} for details.
3822 */
3823 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3824 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3825 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3826 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3827 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3828 }
3829#endif
3830#ifndef IN_NEM_DARWIN
3831 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3832#endif
3833 CPUMSetGuestCR0(pVCpu, u64Cr0);
3834#ifndef IN_NEM_DARWIN
3835 VMMRZCallRing3Enable(pVCpu);
3836#endif
3837 }
3838
3839 if (fWhat & CPUMCTX_EXTRN_CR4)
3840 {
3841 uint64_t u64Cr4;
3842 uint64_t u64Shadow;
3843 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3844 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3845#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3846 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3847 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3848#else
3849 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3850 {
3851 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3852 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3853 }
3854 else
3855 {
3856 /*
3857 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3858 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3859 * re-construct CR4. See @bugref{9180#c95} for details.
3860 */
3861 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3862 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3863 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3864 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3865 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3866 }
3867#endif
3868 pCtx->cr4 = u64Cr4;
3869 }
3870
3871 if (fWhat & CPUMCTX_EXTRN_CR3)
3872 {
3873 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3874 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3875 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3876 && CPUMIsGuestPagingEnabledEx(pCtx)))
3877 {
3878 uint64_t u64Cr3;
3879 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3880 if (pCtx->cr3 != u64Cr3)
3881 {
3882 pCtx->cr3 = u64Cr3;
3883 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3884 }
3885
3886 /*
3887 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3888 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3889 */
3890 if (CPUMIsGuestInPAEModeEx(pCtx))
3891 {
3892 X86PDPE aPaePdpes[4];
3893 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3894 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3895 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3896 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3897 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3898 {
3899 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3900 /* PGM now updates PAE PDPTEs while updating CR3. */
3901 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3902 }
3903 }
3904 }
3905 }
3906 }
3907
3908#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3909 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3910 {
3911 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3912 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3913 {
3914 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3915 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3916 if (RT_SUCCESS(rc))
3917 { /* likely */ }
3918 else
3919 break;
3920 }
3921 }
3922#endif
3923 } while (0);
3924
3925 if (RT_SUCCESS(rc))
3926 {
3927 /* Update fExtrn. */
3928 pCtx->fExtrn &= ~fWhat;
3929
3930 /* If everything has been imported, clear the HM keeper bit. */
3931 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3932 {
3933#ifndef IN_NEM_DARWIN
3934 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3935#else
3936 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3937#endif
3938 Assert(!pCtx->fExtrn);
3939 }
3940 }
3941 }
3942#ifndef IN_NEM_DARWIN
3943 else
3944 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3945
3946 /*
3947 * Restore interrupts.
3948 */
3949 ASMSetFlags(fEFlags);
3950#endif
3951
3952 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3953
3954 if (RT_SUCCESS(rc))
3955 { /* likely */ }
3956 else
3957 return rc;
3958
3959 /*
3960 * Honor any pending CR3 updates.
3961 *
3962 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3963 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3964 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3965 *
3966 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3967 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3968 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3969 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3970 *
3971 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3972 *
3973 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3974 */
3975 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3976#ifndef IN_NEM_DARWIN
3977 && VMMRZCallRing3IsEnabled(pVCpu)
3978#endif
3979 )
3980 {
3981 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3982 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3983 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3984 }
3985
3986 return VINF_SUCCESS;
3987}
3988
3989
3990/**
3991 * Check per-VM and per-VCPU force flag actions that require us to go back to
3992 * ring-3 for one reason or another.
3993 *
3994 * @returns Strict VBox status code (i.e. informational status codes too)
3995 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3996 * ring-3.
3997 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3998 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3999 * interrupts)
4000 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4001 * all EMTs to be in ring-3.
4002 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4003 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4004 * to the EM loop.
4005 *
4006 * @param pVCpu The cross context virtual CPU structure.
4007 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4008 * @param fStepping Whether we are single-stepping the guest using the
4009 * hypervisor debugger.
4010 *
4011 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4012 * is no longer in VMX non-root mode.
4013 */
4014static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4015{
4016#ifndef IN_NEM_DARWIN
4017 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4018#endif
4019
4020 /*
4021 * Update pending interrupts into the APIC's IRR.
4022 */
4023 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4024 APICUpdatePendingInterrupts(pVCpu);
4025
4026 /*
4027 * Anything pending? Should be more likely than not if we're doing a good job.
4028 */
4029 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4030 if ( !fStepping
4031 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4032 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4033 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4034 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4035 return VINF_SUCCESS;
4036
4037 /* Pending PGM C3 sync. */
4038 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4039 {
4040 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4041 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4042 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4043 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4044 if (rcStrict != VINF_SUCCESS)
4045 {
4046 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4047 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4048 return rcStrict;
4049 }
4050 }
4051
4052 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4053 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4054 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4055 {
4056 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4057 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4058 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4059 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4060 return rc;
4061 }
4062
4063 /* Pending VM request packets, such as hardware interrupts. */
4064 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4065 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4066 {
4067 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4068 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4069 return VINF_EM_PENDING_REQUEST;
4070 }
4071
4072 /* Pending PGM pool flushes. */
4073 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4074 {
4075 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4076 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4077 return VINF_PGM_POOL_FLUSH_PENDING;
4078 }
4079
4080 /* Pending DMA requests. */
4081 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4082 {
4083 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4084 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4085 return VINF_EM_RAW_TO_R3;
4086 }
4087
4088#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4089 /*
4090 * Pending nested-guest events.
4091 *
4092 * Please note the priority of these events are specified and important.
4093 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4094 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4095 */
4096 if (fIsNestedGuest)
4097 {
4098 /* Pending nested-guest APIC-write. */
4099 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4100 {
4101 Log4Func(("Pending nested-guest APIC-write\n"));
4102 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4103 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4104 return rcStrict;
4105 }
4106
4107 /* Pending nested-guest monitor-trap flag (MTF). */
4108 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4109 {
4110 Log4Func(("Pending nested-guest MTF\n"));
4111 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4112 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4113 return rcStrict;
4114 }
4115
4116 /* Pending nested-guest VMX-preemption timer expired. */
4117 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4118 {
4119 Log4Func(("Pending nested-guest preempt timer\n"));
4120 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4121 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4122 return rcStrict;
4123 }
4124 }
4125#else
4126 NOREF(fIsNestedGuest);
4127#endif
4128
4129 return VINF_SUCCESS;
4130}
4131
4132
4133/**
4134 * Converts any TRPM trap into a pending HM event. This is typically used when
4135 * entering from ring-3 (not longjmp returns).
4136 *
4137 * @param pVCpu The cross context virtual CPU structure.
4138 */
4139static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4140{
4141 Assert(TRPMHasTrap(pVCpu));
4142 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4143
4144 uint8_t uVector;
4145 TRPMEVENT enmTrpmEvent;
4146 uint32_t uErrCode;
4147 RTGCUINTPTR GCPtrFaultAddress;
4148 uint8_t cbInstr;
4149 bool fIcebp;
4150
4151 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4152 AssertRC(rc);
4153
4154 uint32_t u32IntInfo;
4155 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4156 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4157
4158 rc = TRPMResetTrap(pVCpu);
4159 AssertRC(rc);
4160 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4161 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4162
4163 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4164}
4165
4166
4167/**
4168 * Converts the pending HM event into a TRPM trap.
4169 *
4170 * @param pVCpu The cross context virtual CPU structure.
4171 */
4172static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4173{
4174 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4175
4176 /* If a trap was already pending, we did something wrong! */
4177 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4178
4179 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4180 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4181 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4182
4183 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4184
4185 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4186 AssertRC(rc);
4187
4188 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4189 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4190
4191 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4192 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4193 else
4194 {
4195 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4196 switch (uVectorType)
4197 {
4198 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4199 TRPMSetTrapDueToIcebp(pVCpu);
4200 RT_FALL_THRU();
4201 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4202 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4203 {
4204 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4205 || ( uVector == X86_XCPT_BP /* INT3 */
4206 || uVector == X86_XCPT_OF /* INTO */
4207 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4208 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4209 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4210 break;
4211 }
4212 }
4213 }
4214
4215 /* We're now done converting the pending event. */
4216 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4217}
4218
4219
4220/**
4221 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4222 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4223 *
4224 * @param pVCpu The cross context virtual CPU structure.
4225 * @param pVmcsInfo The VMCS info. object.
4226 */
4227static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4228{
4229 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4230 {
4231 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4232 {
4233 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4234 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4235 AssertRC(rc);
4236 }
4237 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4238}
4239
4240
4241/**
4242 * Clears the interrupt-window exiting control in the VMCS.
4243 *
4244 * @param pVCpu The cross context virtual CPU structure.
4245 * @param pVmcsInfo The VMCS info. object.
4246 */
4247DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4248{
4249 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4250 {
4251 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4252 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4253 AssertRC(rc);
4254 }
4255}
4256
4257
4258/**
4259 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4260 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4261 *
4262 * @param pVCpu The cross context virtual CPU structure.
4263 * @param pVmcsInfo The VMCS info. object.
4264 */
4265static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4266{
4267 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4268 {
4269 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4270 {
4271 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4272 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4273 AssertRC(rc);
4274 Log4Func(("Setup NMI-window exiting\n"));
4275 }
4276 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4277}
4278
4279
4280/**
4281 * Clears the NMI-window exiting control in the VMCS.
4282 *
4283 * @param pVCpu The cross context virtual CPU structure.
4284 * @param pVmcsInfo The VMCS info. object.
4285 */
4286DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4287{
4288 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4289 {
4290 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4291 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4292 AssertRC(rc);
4293 }
4294}
4295
4296
4297/**
4298 * Injects an event into the guest upon VM-entry by updating the relevant fields
4299 * in the VM-entry area in the VMCS.
4300 *
4301 * @returns Strict VBox status code (i.e. informational status codes too).
4302 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4303 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4304 *
4305 * @param pVCpu The cross context virtual CPU structure.
4306 * @param pVmcsInfo The VMCS info object.
4307 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4308 * @param pEvent The event being injected.
4309 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4310 * will be updated if necessary. This cannot not be NULL.
4311 * @param fStepping Whether we're single-stepping guest execution and should
4312 * return VINF_EM_DBG_STEPPED if the event is injected
4313 * directly (registers modified by us, not by hardware on
4314 * VM-entry).
4315 */
4316static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4317 bool fStepping, uint32_t *pfIntrState)
4318{
4319 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4320 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4321 Assert(pfIntrState);
4322
4323#ifdef IN_NEM_DARWIN
4324 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4325#endif
4326
4327 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4328 uint32_t u32IntInfo = pEvent->u64IntInfo;
4329 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4330 uint32_t const cbInstr = pEvent->cbInstr;
4331 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4332 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4333 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4334
4335#ifdef VBOX_STRICT
4336 /*
4337 * Validate the error-code-valid bit for hardware exceptions.
4338 * No error codes for exceptions in real-mode.
4339 *
4340 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4341 */
4342 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4343 && !CPUMIsGuestInRealModeEx(pCtx))
4344 {
4345 switch (uVector)
4346 {
4347 case X86_XCPT_PF:
4348 case X86_XCPT_DF:
4349 case X86_XCPT_TS:
4350 case X86_XCPT_NP:
4351 case X86_XCPT_SS:
4352 case X86_XCPT_GP:
4353 case X86_XCPT_AC:
4354 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4355 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4356 RT_FALL_THRU();
4357 default:
4358 break;
4359 }
4360 }
4361
4362 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4363 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4364 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4365#endif
4366
4367 RT_NOREF(uVector);
4368 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4369 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4370 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4371 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4372 {
4373 Assert(uVector <= X86_XCPT_LAST);
4374 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4375 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4376 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4377 }
4378 else
4379 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4380
4381 /*
4382 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4383 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4384 * interrupt handler in the (real-mode) guest.
4385 *
4386 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4387 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4388 */
4389 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4390 {
4391#ifndef IN_NEM_DARWIN
4392 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4393#endif
4394 {
4395 /*
4396 * For CPUs with unrestricted guest execution enabled and with the guest
4397 * in real-mode, we must not set the deliver-error-code bit.
4398 *
4399 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4400 */
4401 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4402 }
4403#ifndef IN_NEM_DARWIN
4404 else
4405 {
4406 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4407 Assert(PDMVmmDevHeapIsEnabled(pVM));
4408 Assert(pVM->hm.s.vmx.pRealModeTSS);
4409 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4410
4411 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4412 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4413 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4414 AssertRCReturn(rc2, rc2);
4415
4416 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4417 size_t const cbIdtEntry = sizeof(X86IDTR16);
4418 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4419 {
4420 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4421 if (uVector == X86_XCPT_DF)
4422 return VINF_EM_RESET;
4423
4424 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4425 No error codes for exceptions in real-mode. */
4426 if (uVector == X86_XCPT_GP)
4427 {
4428 static HMEVENT const s_EventXcptDf
4429 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4430 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4431 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4432 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4433 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4434 }
4435
4436 /*
4437 * If we're injecting an event with no valid IDT entry, inject a #GP.
4438 * No error codes for exceptions in real-mode.
4439 *
4440 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4441 */
4442 static HMEVENT const s_EventXcptGp
4443 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4444 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4445 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4446 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4447 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4448 }
4449
4450 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4451 uint16_t uGuestIp = pCtx->ip;
4452 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4453 {
4454 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4455 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4456 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4457 }
4458 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4459 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4460
4461 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4462 X86IDTR16 IdtEntry;
4463 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4464 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4465 AssertRCReturn(rc2, rc2);
4466
4467 /* Construct the stack frame for the interrupt/exception handler. */
4468 VBOXSTRICTRC rcStrict;
4469 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4470 if (rcStrict == VINF_SUCCESS)
4471 {
4472 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4473 if (rcStrict == VINF_SUCCESS)
4474 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4475 }
4476
4477 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4478 if (rcStrict == VINF_SUCCESS)
4479 {
4480 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4481 pCtx->rip = IdtEntry.offSel;
4482 pCtx->cs.Sel = IdtEntry.uSel;
4483 pCtx->cs.ValidSel = IdtEntry.uSel;
4484 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4485 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4486 && uVector == X86_XCPT_PF)
4487 pCtx->cr2 = GCPtrFault;
4488
4489 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4490 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4491 | HM_CHANGED_GUEST_RSP);
4492
4493 /*
4494 * If we delivered a hardware exception (other than an NMI) and if there was
4495 * block-by-STI in effect, we should clear it.
4496 */
4497 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4498 {
4499 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4500 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4501 Log4Func(("Clearing inhibition due to STI\n"));
4502 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4503 }
4504
4505 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4506 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4507
4508 /*
4509 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4510 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4511 */
4512 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4513
4514 /*
4515 * If we eventually support nested-guest execution without unrestricted guest execution,
4516 * we should set fInterceptEvents here.
4517 */
4518 Assert(!fIsNestedGuest);
4519
4520 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4521 if (fStepping)
4522 rcStrict = VINF_EM_DBG_STEPPED;
4523 }
4524 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4525 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4526 return rcStrict;
4527 }
4528#else
4529 RT_NOREF(pVmcsInfo);
4530#endif
4531 }
4532
4533 /*
4534 * Validate.
4535 */
4536 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4537 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4538
4539 /*
4540 * Inject the event into the VMCS.
4541 */
4542 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4543 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4544 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4545 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4546 AssertRC(rc);
4547
4548 /*
4549 * Update guest CR2 if this is a page-fault.
4550 */
4551 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4552 pCtx->cr2 = GCPtrFault;
4553
4554 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4555 return VINF_SUCCESS;
4556}
4557
4558
4559/**
4560 * Evaluates the event to be delivered to the guest and sets it as the pending
4561 * event.
4562 *
4563 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4564 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4565 * NOT restore these force-flags.
4566 *
4567 * @returns Strict VBox status code (i.e. informational status codes too).
4568 * @param pVCpu The cross context virtual CPU structure.
4569 * @param pVmcsInfo The VMCS information structure.
4570 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4571 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4572 */
4573static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4574{
4575 Assert(pfIntrState);
4576 Assert(!TRPMHasTrap(pVCpu));
4577
4578 /*
4579 * Compute/update guest-interruptibility state related FFs.
4580 * The FFs will be used below while evaluating events to be injected.
4581 */
4582 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4583
4584 /*
4585 * Evaluate if a new event needs to be injected.
4586 * An event that's already pending has already performed all necessary checks.
4587 */
4588 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4589 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4590 {
4591 /** @todo SMI. SMIs take priority over NMIs. */
4592
4593 /*
4594 * NMIs.
4595 * NMIs take priority over external interrupts.
4596 */
4597#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4598 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4599#endif
4600 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4601 {
4602 /*
4603 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4604 *
4605 * For a nested-guest, the FF always indicates the outer guest's ability to
4606 * receive an NMI while the guest-interruptibility state bit depends on whether
4607 * the nested-hypervisor is using virtual-NMIs.
4608 */
4609 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4610 {
4611#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4612 if ( fIsNestedGuest
4613 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4614 return IEMExecVmxVmexitXcptNmi(pVCpu);
4615#endif
4616 vmxHCSetPendingXcptNmi(pVCpu);
4617 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4618 Log4Func(("NMI pending injection\n"));
4619
4620 /* We've injected the NMI, bail. */
4621 return VINF_SUCCESS;
4622 }
4623 else if (!fIsNestedGuest)
4624 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4625 }
4626
4627 /*
4628 * External interrupts (PIC/APIC).
4629 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4630 * We cannot re-request the interrupt from the controller again.
4631 */
4632 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4633 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4634 {
4635 Assert(!DBGFIsStepping(pVCpu));
4636 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4637 AssertRC(rc);
4638
4639 /*
4640 * We must not check EFLAGS directly when executing a nested-guest, use
4641 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4642 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4643 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4644 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4645 *
4646 * See Intel spec. 25.4.1 "Event Blocking".
4647 */
4648 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4649 {
4650#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4651 if ( fIsNestedGuest
4652 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4653 {
4654 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4655 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4656 return rcStrict;
4657 }
4658#endif
4659 uint8_t u8Interrupt;
4660 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4661 if (RT_SUCCESS(rc))
4662 {
4663#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4664 if ( fIsNestedGuest
4665 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4666 {
4667 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4668 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4669 return rcStrict;
4670 }
4671#endif
4672 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4673 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4674 }
4675 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4676 {
4677 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4678
4679 if ( !fIsNestedGuest
4680 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4681 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4682 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4683
4684 /*
4685 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4686 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4687 * need to re-set this force-flag here.
4688 */
4689 }
4690 else
4691 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4692
4693 /* We've injected the interrupt or taken necessary action, bail. */
4694 return VINF_SUCCESS;
4695 }
4696 if (!fIsNestedGuest)
4697 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4698 }
4699 }
4700 else if (!fIsNestedGuest)
4701 {
4702 /*
4703 * An event is being injected or we are in an interrupt shadow. Check if another event is
4704 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4705 * the pending event.
4706 */
4707 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4708 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4709 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4710 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4711 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4712 }
4713 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4714
4715 return VINF_SUCCESS;
4716}
4717
4718
4719/**
4720 * Injects any pending events into the guest if the guest is in a state to
4721 * receive them.
4722 *
4723 * @returns Strict VBox status code (i.e. informational status codes too).
4724 * @param pVCpu The cross context virtual CPU structure.
4725 * @param pVmcsInfo The VMCS information structure.
4726 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4727 * @param fIntrState The VT-x guest-interruptibility state.
4728 * @param fStepping Whether we are single-stepping the guest using the
4729 * hypervisor debugger and should return
4730 * VINF_EM_DBG_STEPPED if the event was dispatched
4731 * directly.
4732 */
4733static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4734 uint32_t fIntrState, bool fStepping)
4735{
4736 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4737#ifndef IN_NEM_DARWIN
4738 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4739#endif
4740
4741#ifdef VBOX_STRICT
4742 /*
4743 * Verify guest-interruptibility state.
4744 *
4745 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4746 * since injecting an event may modify the interruptibility state and we must thus always
4747 * use fIntrState.
4748 */
4749 {
4750 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4751 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4752 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4753 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4754 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4755 Assert(!TRPMHasTrap(pVCpu));
4756 NOREF(fBlockMovSS); NOREF(fBlockSti);
4757 }
4758#endif
4759
4760 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4761 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4762 {
4763 /*
4764 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4765 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4766 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4767 *
4768 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4769 */
4770 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4771#ifdef VBOX_STRICT
4772 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4773 {
4774 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4775 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4776 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4777 }
4778 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4779 {
4780 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4781 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4782 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4783 }
4784#endif
4785 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4786 uIntType));
4787
4788 /*
4789 * Inject the event and get any changes to the guest-interruptibility state.
4790 *
4791 * The guest-interruptibility state may need to be updated if we inject the event
4792 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4793 */
4794 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4795 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4796
4797 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4798 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4799 else
4800 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4801 }
4802
4803 /*
4804 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4805 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4806 */
4807 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4808 && !fIsNestedGuest)
4809 {
4810 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4811
4812 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4813 {
4814 /*
4815 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4816 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4817 */
4818 Assert(!DBGFIsStepping(pVCpu));
4819 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4820 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4821 AssertRC(rc);
4822 }
4823 else
4824 {
4825 /*
4826 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4827 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4828 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4829 * we use MTF, so just make sure it's called before executing guest-code.
4830 */
4831 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4832 }
4833 }
4834 /* else: for nested-guest currently handling while merging controls. */
4835
4836 /*
4837 * Finally, update the guest-interruptibility state.
4838 *
4839 * This is required for the real-on-v86 software interrupt injection, for
4840 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4841 */
4842 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4843 AssertRC(rc);
4844
4845 /*
4846 * There's no need to clear the VM-entry interruption-information field here if we're not
4847 * injecting anything. VT-x clears the valid bit on every VM-exit.
4848 *
4849 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4850 */
4851
4852 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4853 return rcStrict;
4854}
4855
4856
4857/**
4858 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4859 * and update error record fields accordingly.
4860 *
4861 * @returns VMX_IGS_* error codes.
4862 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4863 * wrong with the guest state.
4864 *
4865 * @param pVCpu The cross context virtual CPU structure.
4866 * @param pVmcsInfo The VMCS info. object.
4867 *
4868 * @remarks This function assumes our cache of the VMCS controls
4869 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4870 */
4871static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4872{
4873#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4874#define HMVMX_CHECK_BREAK(expr, err) do { \
4875 if (!(expr)) { uError = (err); break; } \
4876 } while (0)
4877
4878 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4879 uint32_t uError = VMX_IGS_ERROR;
4880 uint32_t u32IntrState = 0;
4881#ifndef IN_NEM_DARWIN
4882 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4883 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4884#else
4885 bool const fUnrestrictedGuest = true;
4886#endif
4887 do
4888 {
4889 int rc;
4890
4891 /*
4892 * Guest-interruptibility state.
4893 *
4894 * Read this first so that any check that fails prior to those that actually
4895 * require the guest-interruptibility state would still reflect the correct
4896 * VMCS value and avoids causing further confusion.
4897 */
4898 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4899 AssertRC(rc);
4900
4901 uint32_t u32Val;
4902 uint64_t u64Val;
4903
4904 /*
4905 * CR0.
4906 */
4907 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4908 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4909 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4910 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4911 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4912 if (fUnrestrictedGuest)
4913 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4914
4915 uint64_t u64GuestCr0;
4916 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4917 AssertRC(rc);
4918 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4919 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4920 if ( !fUnrestrictedGuest
4921 && (u64GuestCr0 & X86_CR0_PG)
4922 && !(u64GuestCr0 & X86_CR0_PE))
4923 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4924
4925 /*
4926 * CR4.
4927 */
4928 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4929 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4930 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4931
4932 uint64_t u64GuestCr4;
4933 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4934 AssertRC(rc);
4935 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4936 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4937
4938 /*
4939 * IA32_DEBUGCTL MSR.
4940 */
4941 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4942 AssertRC(rc);
4943 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4944 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4945 {
4946 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4947 }
4948 uint64_t u64DebugCtlMsr = u64Val;
4949
4950#ifdef VBOX_STRICT
4951 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4952 AssertRC(rc);
4953 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4954#endif
4955 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4956
4957 /*
4958 * RIP and RFLAGS.
4959 */
4960 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4961 AssertRC(rc);
4962 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4963 if ( !fLongModeGuest
4964 || !pCtx->cs.Attr.n.u1Long)
4965 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4966 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4967 * must be identical if the "IA-32e mode guest" VM-entry
4968 * control is 1 and CS.L is 1. No check applies if the
4969 * CPU supports 64 linear-address bits. */
4970
4971 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4972 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4973 AssertRC(rc);
4974 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4975 VMX_IGS_RFLAGS_RESERVED);
4976 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4977 uint32_t const u32Eflags = u64Val;
4978
4979 if ( fLongModeGuest
4980 || ( fUnrestrictedGuest
4981 && !(u64GuestCr0 & X86_CR0_PE)))
4982 {
4983 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4984 }
4985
4986 uint32_t u32EntryInfo;
4987 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4988 AssertRC(rc);
4989 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4990 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4991
4992 /*
4993 * 64-bit checks.
4994 */
4995 if (fLongModeGuest)
4996 {
4997 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4998 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4999 }
5000
5001 if ( !fLongModeGuest
5002 && (u64GuestCr4 & X86_CR4_PCIDE))
5003 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5004
5005 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5006 * 51:32 beyond the processor's physical-address width are 0. */
5007
5008 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5009 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5010 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5011
5012#ifndef IN_NEM_DARWIN
5013 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5014 AssertRC(rc);
5015 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5016
5017 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5018 AssertRC(rc);
5019 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5020#endif
5021
5022 /*
5023 * PERF_GLOBAL MSR.
5024 */
5025 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5026 {
5027 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5028 AssertRC(rc);
5029 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5030 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5031 }
5032
5033 /*
5034 * PAT MSR.
5035 */
5036 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5037 {
5038 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5039 AssertRC(rc);
5040 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5041 for (unsigned i = 0; i < 8; i++)
5042 {
5043 uint8_t u8Val = (u64Val & 0xff);
5044 if ( u8Val != 0 /* UC */
5045 && u8Val != 1 /* WC */
5046 && u8Val != 4 /* WT */
5047 && u8Val != 5 /* WP */
5048 && u8Val != 6 /* WB */
5049 && u8Val != 7 /* UC- */)
5050 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5051 u64Val >>= 8;
5052 }
5053 }
5054
5055 /*
5056 * EFER MSR.
5057 */
5058 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5059 {
5060 Assert(g_fHmVmxSupportsVmcsEfer);
5061 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5062 AssertRC(rc);
5063 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5064 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5065 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5066 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5067 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5068 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5069 * iemVmxVmentryCheckGuestState(). */
5070 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5071 || !(u64GuestCr0 & X86_CR0_PG)
5072 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5073 VMX_IGS_EFER_LMA_LME_MISMATCH);
5074 }
5075
5076 /*
5077 * Segment registers.
5078 */
5079 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5080 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5081 if (!(u32Eflags & X86_EFL_VM))
5082 {
5083 /* CS */
5084 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5085 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5086 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5087 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5088 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5089 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5090 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5091 /* CS cannot be loaded with NULL in protected mode. */
5092 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5093 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5094 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5095 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5096 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5097 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5098 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5099 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5100 else
5101 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5102
5103 /* SS */
5104 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5105 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5106 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5107 if ( !(pCtx->cr0 & X86_CR0_PE)
5108 || pCtx->cs.Attr.n.u4Type == 3)
5109 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5110
5111 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5112 {
5113 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5114 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5115 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5116 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5117 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5118 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5119 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5120 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5121 }
5122
5123 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5124 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5125 {
5126 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5127 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5128 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5129 || pCtx->ds.Attr.n.u4Type > 11
5130 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5131 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5132 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5133 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5134 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5135 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5136 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5137 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5138 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5139 }
5140 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5141 {
5142 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5143 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5144 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5145 || pCtx->es.Attr.n.u4Type > 11
5146 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5147 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5148 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5149 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5150 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5151 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5152 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5153 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5154 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5155 }
5156 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5157 {
5158 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5159 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5160 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5161 || pCtx->fs.Attr.n.u4Type > 11
5162 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5163 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5164 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5165 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5166 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5167 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5168 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5169 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5170 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5171 }
5172 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5173 {
5174 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5175 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5176 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5177 || pCtx->gs.Attr.n.u4Type > 11
5178 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5179 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5180 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5181 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5182 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5183 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5184 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5185 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5186 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5187 }
5188 /* 64-bit capable CPUs. */
5189 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5190 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5191 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5192 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5193 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5194 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5195 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5196 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5197 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5198 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5199 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5200 }
5201 else
5202 {
5203 /* V86 mode checks. */
5204 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5205 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5206 {
5207 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5208 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5209 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5210 }
5211 else
5212 {
5213 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5214 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5215 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5216 }
5217
5218 /* CS */
5219 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5220 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5221 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5222 /* SS */
5223 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5224 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5225 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5226 /* DS */
5227 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5228 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5229 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5230 /* ES */
5231 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5232 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5233 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5234 /* FS */
5235 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5236 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5237 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5238 /* GS */
5239 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5240 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5241 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5242 /* 64-bit capable CPUs. */
5243 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5244 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5245 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5246 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5247 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5248 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5249 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5250 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5251 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5252 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5253 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5254 }
5255
5256 /*
5257 * TR.
5258 */
5259 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5260 /* 64-bit capable CPUs. */
5261 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5262 if (fLongModeGuest)
5263 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5264 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5265 else
5266 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5267 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5268 VMX_IGS_TR_ATTR_TYPE_INVALID);
5269 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5270 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5271 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5272 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5273 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5274 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5275 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5276 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5277
5278 /*
5279 * GDTR and IDTR (64-bit capable checks).
5280 */
5281 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5282 AssertRC(rc);
5283 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5284
5285 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5286 AssertRC(rc);
5287 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5288
5289 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5290 AssertRC(rc);
5291 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5292
5293 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5294 AssertRC(rc);
5295 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5296
5297 /*
5298 * Guest Non-Register State.
5299 */
5300 /* Activity State. */
5301 uint32_t u32ActivityState;
5302 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5303 AssertRC(rc);
5304 HMVMX_CHECK_BREAK( !u32ActivityState
5305 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5306 VMX_IGS_ACTIVITY_STATE_INVALID);
5307 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5308 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5309
5310 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5311 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5312 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5313
5314 /** @todo Activity state and injecting interrupts. Left as a todo since we
5315 * currently don't use activity states but ACTIVE. */
5316
5317 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5318 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5319
5320 /* Guest interruptibility-state. */
5321 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5322 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5323 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5324 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5325 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5326 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5327 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5328 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5329 {
5330 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5331 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5332 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5333 }
5334 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5335 {
5336 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5337 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5338 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5339 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5340 }
5341 /** @todo Assumes the processor is not in SMM. */
5342 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5343 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5344 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5345 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5346 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5347 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5348 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5349 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5350
5351 /* Pending debug exceptions. */
5352 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5353 AssertRC(rc);
5354 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5355 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5356 u32Val = u64Val; /* For pending debug exceptions checks below. */
5357
5358 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5359 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5360 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5361 {
5362 if ( (u32Eflags & X86_EFL_TF)
5363 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5364 {
5365 /* Bit 14 is PendingDebug.BS. */
5366 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5367 }
5368 if ( !(u32Eflags & X86_EFL_TF)
5369 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5370 {
5371 /* Bit 14 is PendingDebug.BS. */
5372 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5373 }
5374 }
5375
5376#ifndef IN_NEM_DARWIN
5377 /* VMCS link pointer. */
5378 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5379 AssertRC(rc);
5380 if (u64Val != UINT64_C(0xffffffffffffffff))
5381 {
5382 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5383 /** @todo Bits beyond the processor's physical-address width MBZ. */
5384 /** @todo SMM checks. */
5385 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5386 Assert(pVmcsInfo->pvShadowVmcs);
5387 VMXVMCSREVID VmcsRevId;
5388 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5389 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5390 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5391 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5392 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5393 }
5394
5395 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5396 * not using nested paging? */
5397 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5398 && !fLongModeGuest
5399 && CPUMIsGuestInPAEModeEx(pCtx))
5400 {
5401 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5402 AssertRC(rc);
5403 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5404
5405 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5406 AssertRC(rc);
5407 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5408
5409 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5410 AssertRC(rc);
5411 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5412
5413 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5414 AssertRC(rc);
5415 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5416 }
5417#endif
5418
5419 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5420 if (uError == VMX_IGS_ERROR)
5421 uError = VMX_IGS_REASON_NOT_FOUND;
5422 } while (0);
5423
5424 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5425 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5426 return uError;
5427
5428#undef HMVMX_ERROR_BREAK
5429#undef HMVMX_CHECK_BREAK
5430}
5431
5432
5433#ifndef HMVMX_USE_FUNCTION_TABLE
5434/**
5435 * Handles a guest VM-exit from hardware-assisted VMX execution.
5436 *
5437 * @returns Strict VBox status code (i.e. informational status codes too).
5438 * @param pVCpu The cross context virtual CPU structure.
5439 * @param pVmxTransient The VMX-transient structure.
5440 */
5441DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5442{
5443#ifdef DEBUG_ramshankar
5444# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5445 do { \
5446 if (a_fSave != 0) \
5447 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5448 VBOXSTRICTRC rcStrict = a_CallExpr; \
5449 if (a_fSave != 0) \
5450 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5451 return rcStrict; \
5452 } while (0)
5453#else
5454# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5455#endif
5456 uint32_t const uExitReason = pVmxTransient->uExitReason;
5457 switch (uExitReason)
5458 {
5459 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5460 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5461 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5462 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5463 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5464 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5465 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5466 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5467 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5468 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5469 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5470 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5471 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5472 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5473 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5474 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5475 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5476 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5477 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5478 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5479 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5480 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5481 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5482 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5483 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5484 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5485 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5486 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5487 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5488 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5489#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5490 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5491 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5492 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5493 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5494 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5495 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5496 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5497 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5498 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5499 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5500#else
5501 case VMX_EXIT_VMCLEAR:
5502 case VMX_EXIT_VMLAUNCH:
5503 case VMX_EXIT_VMPTRLD:
5504 case VMX_EXIT_VMPTRST:
5505 case VMX_EXIT_VMREAD:
5506 case VMX_EXIT_VMRESUME:
5507 case VMX_EXIT_VMWRITE:
5508 case VMX_EXIT_VMXOFF:
5509 case VMX_EXIT_VMXON:
5510 case VMX_EXIT_INVVPID:
5511 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5512#endif
5513#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5514 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5515#else
5516 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5517#endif
5518
5519 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5520 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5521 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5522
5523 case VMX_EXIT_INIT_SIGNAL:
5524 case VMX_EXIT_SIPI:
5525 case VMX_EXIT_IO_SMI:
5526 case VMX_EXIT_SMI:
5527 case VMX_EXIT_ERR_MSR_LOAD:
5528 case VMX_EXIT_ERR_MACHINE_CHECK:
5529 case VMX_EXIT_PML_FULL:
5530 case VMX_EXIT_VIRTUALIZED_EOI:
5531 case VMX_EXIT_GDTR_IDTR_ACCESS:
5532 case VMX_EXIT_LDTR_TR_ACCESS:
5533 case VMX_EXIT_APIC_WRITE:
5534 case VMX_EXIT_RDRAND:
5535 case VMX_EXIT_RSM:
5536 case VMX_EXIT_VMFUNC:
5537 case VMX_EXIT_ENCLS:
5538 case VMX_EXIT_RDSEED:
5539 case VMX_EXIT_XSAVES:
5540 case VMX_EXIT_XRSTORS:
5541 case VMX_EXIT_UMWAIT:
5542 case VMX_EXIT_TPAUSE:
5543 case VMX_EXIT_LOADIWKEY:
5544 default:
5545 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5546 }
5547#undef VMEXIT_CALL_RET
5548}
5549#endif /* !HMVMX_USE_FUNCTION_TABLE */
5550
5551
5552#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5553/**
5554 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5555 *
5556 * @returns Strict VBox status code (i.e. informational status codes too).
5557 * @param pVCpu The cross context virtual CPU structure.
5558 * @param pVmxTransient The VMX-transient structure.
5559 */
5560DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5561{
5562 uint32_t const uExitReason = pVmxTransient->uExitReason;
5563 switch (uExitReason)
5564 {
5565# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5566 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5567 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5568# else
5569 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5570 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5571# endif
5572 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5573 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5574 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5575
5576 /*
5577 * We shouldn't direct host physical interrupts to the nested-guest.
5578 */
5579 case VMX_EXIT_EXT_INT:
5580 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5581
5582 /*
5583 * Instructions that cause VM-exits unconditionally or the condition is
5584 * always taken solely from the nested hypervisor (meaning if the VM-exit
5585 * happens, it's guaranteed to be a nested-guest VM-exit).
5586 *
5587 * - Provides VM-exit instruction length ONLY.
5588 */
5589 case VMX_EXIT_CPUID: /* Unconditional. */
5590 case VMX_EXIT_VMCALL:
5591 case VMX_EXIT_GETSEC:
5592 case VMX_EXIT_INVD:
5593 case VMX_EXIT_XSETBV:
5594 case VMX_EXIT_VMLAUNCH:
5595 case VMX_EXIT_VMRESUME:
5596 case VMX_EXIT_VMXOFF:
5597 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5598 case VMX_EXIT_VMFUNC:
5599 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5600
5601 /*
5602 * Instructions that cause VM-exits unconditionally or the condition is
5603 * always taken solely from the nested hypervisor (meaning if the VM-exit
5604 * happens, it's guaranteed to be a nested-guest VM-exit).
5605 *
5606 * - Provides VM-exit instruction length.
5607 * - Provides VM-exit information.
5608 * - Optionally provides Exit qualification.
5609 *
5610 * Since Exit qualification is 0 for all VM-exits where it is not
5611 * applicable, reading and passing it to the guest should produce
5612 * defined behavior.
5613 *
5614 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5615 */
5616 case VMX_EXIT_INVEPT: /* Unconditional. */
5617 case VMX_EXIT_INVVPID:
5618 case VMX_EXIT_VMCLEAR:
5619 case VMX_EXIT_VMPTRLD:
5620 case VMX_EXIT_VMPTRST:
5621 case VMX_EXIT_VMXON:
5622 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5623 case VMX_EXIT_LDTR_TR_ACCESS:
5624 case VMX_EXIT_RDRAND:
5625 case VMX_EXIT_RDSEED:
5626 case VMX_EXIT_XSAVES:
5627 case VMX_EXIT_XRSTORS:
5628 case VMX_EXIT_UMWAIT:
5629 case VMX_EXIT_TPAUSE:
5630 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5631
5632 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5633 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5634 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5635 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5636 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5637 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5638 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5639 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5640 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5641 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5642 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5643 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5644 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5645 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5646 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5647 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5648 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5649 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5650 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5651
5652 case VMX_EXIT_PREEMPT_TIMER:
5653 {
5654 /** @todo NSTVMX: Preempt timer. */
5655 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5656 }
5657
5658 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5659 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5660
5661 case VMX_EXIT_VMREAD:
5662 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5663
5664 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5665 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5666
5667 case VMX_EXIT_INIT_SIGNAL:
5668 case VMX_EXIT_SIPI:
5669 case VMX_EXIT_IO_SMI:
5670 case VMX_EXIT_SMI:
5671 case VMX_EXIT_ERR_MSR_LOAD:
5672 case VMX_EXIT_ERR_MACHINE_CHECK:
5673 case VMX_EXIT_PML_FULL:
5674 case VMX_EXIT_RSM:
5675 default:
5676 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5677 }
5678}
5679#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5680
5681
5682/** @name VM-exit helpers.
5683 * @{
5684 */
5685/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5686/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5687/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5688
5689/** Macro for VM-exits called unexpectedly. */
5690#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5691 do { \
5692 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5693 return VERR_VMX_UNEXPECTED_EXIT; \
5694 } while (0)
5695
5696#ifdef VBOX_STRICT
5697# ifndef IN_NEM_DARWIN
5698/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5699# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5700 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5701
5702# define HMVMX_ASSERT_PREEMPT_CPUID() \
5703 do { \
5704 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5705 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5706 } while (0)
5707
5708# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5709 do { \
5710 AssertPtr((a_pVCpu)); \
5711 AssertPtr((a_pVmxTransient)); \
5712 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
5713 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
5714 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
5715 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
5716 Assert((a_pVmxTransient)->pVmcsInfo); \
5717 Assert(ASMIntAreEnabled()); \
5718 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5719 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5720 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5721 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5722 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5723 HMVMX_ASSERT_PREEMPT_CPUID(); \
5724 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5725 } while (0)
5726# else
5727# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5728# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5729# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5730 do { \
5731 AssertPtr((a_pVCpu)); \
5732 AssertPtr((a_pVmxTransient)); \
5733 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
5734 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
5735 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
5736 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
5737 Assert((a_pVmxTransient)->pVmcsInfo); \
5738 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5739 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5740 } while (0)
5741# endif
5742
5743# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5744 do { \
5745 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5746 Assert((a_pVmxTransient)->fIsNestedGuest); \
5747 } while (0)
5748
5749# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5750 do { \
5751 Log4Func(("\n")); \
5752 } while (0)
5753#else
5754# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5755 do { \
5756 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5757 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5758 } while (0)
5759
5760# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5761 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5762
5763# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5764#endif
5765
5766#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5767/** Macro that does the necessary privilege checks and intercepted VM-exits for
5768 * guests that attempted to execute a VMX instruction. */
5769# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5770 do \
5771 { \
5772 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5773 if (rcStrictTmp == VINF_SUCCESS) \
5774 { /* likely */ } \
5775 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5776 { \
5777 Assert((a_pVCpu)->hm.s.Event.fPending); \
5778 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5779 return VINF_SUCCESS; \
5780 } \
5781 else \
5782 { \
5783 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5784 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5785 } \
5786 } while (0)
5787
5788/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5789# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5790 do \
5791 { \
5792 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5793 (a_pGCPtrEffAddr)); \
5794 if (rcStrictTmp == VINF_SUCCESS) \
5795 { /* likely */ } \
5796 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5797 { \
5798 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5799 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5800 NOREF(uXcptTmp); \
5801 return VINF_SUCCESS; \
5802 } \
5803 else \
5804 { \
5805 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5806 return rcStrictTmp; \
5807 } \
5808 } while (0)
5809#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5810
5811
5812/**
5813 * Advances the guest RIP by the specified number of bytes.
5814 *
5815 * @param pVCpu The cross context virtual CPU structure.
5816 * @param cbInstr Number of bytes to advance the RIP by.
5817 *
5818 * @remarks No-long-jump zone!!!
5819 */
5820DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5821{
5822 /* Advance the RIP. */
5823 pVCpu->cpum.GstCtx.rip += cbInstr;
5824 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5825
5826 /* Update interrupt inhibition. */
5827 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5828 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5829 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5830}
5831
5832
5833/**
5834 * Advances the guest RIP after reading it from the VMCS.
5835 *
5836 * @returns VBox status code, no informational status codes.
5837 * @param pVCpu The cross context virtual CPU structure.
5838 * @param pVmxTransient The VMX-transient structure.
5839 *
5840 * @remarks No-long-jump zone!!!
5841 */
5842static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5843{
5844 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
5845 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5846 AssertRCReturn(rc, rc);
5847
5848 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5849 return VINF_SUCCESS;
5850}
5851
5852
5853/**
5854 * Handle a condition that occurred while delivering an event through the guest or
5855 * nested-guest IDT.
5856 *
5857 * @returns Strict VBox status code (i.e. informational status codes too).
5858 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5859 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5860 * to continue execution of the guest which will delivery the \#DF.
5861 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5862 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5863 *
5864 * @param pVCpu The cross context virtual CPU structure.
5865 * @param pVmxTransient The VMX-transient structure.
5866 *
5867 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5868 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5869 * is due to an EPT violation, PML full or SPP-related event.
5870 *
5871 * @remarks No-long-jump zone!!!
5872 */
5873static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5874{
5875 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5876 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5877 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5878 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5879 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5880 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5881
5882 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5883 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5884 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5885 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5886 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5887 {
5888 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5889 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5890
5891 /*
5892 * If the event was a software interrupt (generated with INT n) or a software exception
5893 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5894 * can handle the VM-exit and continue guest execution which will re-execute the
5895 * instruction rather than re-injecting the exception, as that can cause premature
5896 * trips to ring-3 before injection and involve TRPM which currently has no way of
5897 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5898 * the problem).
5899 */
5900 IEMXCPTRAISE enmRaise;
5901 IEMXCPTRAISEINFO fRaiseInfo;
5902 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5903 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5904 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5905 {
5906 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5907 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5908 }
5909 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5910 {
5911 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5912 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5913 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5914
5915 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5916 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5917
5918 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5919
5920 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5921 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5922 {
5923 pVmxTransient->fVectoringPF = true;
5924 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5925 }
5926 }
5927 else
5928 {
5929 /*
5930 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5931 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5932 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5933 */
5934 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5935 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5936 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5937 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5938 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5939 }
5940
5941 /*
5942 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5943 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5944 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5945 * subsequent VM-entry would fail, see @bugref{7445}.
5946 *
5947 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5948 */
5949 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5950 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5951 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5952 && CPUMIsGuestNmiBlocking(pVCpu))
5953 {
5954 CPUMSetGuestNmiBlocking(pVCpu, false);
5955 }
5956
5957 switch (enmRaise)
5958 {
5959 case IEMXCPTRAISE_CURRENT_XCPT:
5960 {
5961 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5962 Assert(rcStrict == VINF_SUCCESS);
5963 break;
5964 }
5965
5966 case IEMXCPTRAISE_PREV_EVENT:
5967 {
5968 uint32_t u32ErrCode;
5969 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5970 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5971 else
5972 u32ErrCode = 0;
5973
5974 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5975 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5976 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
5977 pVCpu->cpum.GstCtx.cr2);
5978
5979 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5980 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5981 Assert(rcStrict == VINF_SUCCESS);
5982 break;
5983 }
5984
5985 case IEMXCPTRAISE_REEXEC_INSTR:
5986 Assert(rcStrict == VINF_SUCCESS);
5987 break;
5988
5989 case IEMXCPTRAISE_DOUBLE_FAULT:
5990 {
5991 /*
5992 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
5993 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5994 */
5995 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5996 {
5997 pVmxTransient->fVectoringDoublePF = true;
5998 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5999 pVCpu->cpum.GstCtx.cr2));
6000 rcStrict = VINF_SUCCESS;
6001 }
6002 else
6003 {
6004 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6005 vmxHCSetPendingXcptDF(pVCpu);
6006 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6007 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6008 rcStrict = VINF_HM_DOUBLE_FAULT;
6009 }
6010 break;
6011 }
6012
6013 case IEMXCPTRAISE_TRIPLE_FAULT:
6014 {
6015 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6016 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6017 rcStrict = VINF_EM_RESET;
6018 break;
6019 }
6020
6021 case IEMXCPTRAISE_CPU_HANG:
6022 {
6023 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6024 rcStrict = VERR_EM_GUEST_CPU_HANG;
6025 break;
6026 }
6027
6028 default:
6029 {
6030 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6031 rcStrict = VERR_VMX_IPE_2;
6032 break;
6033 }
6034 }
6035 }
6036 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6037 && !CPUMIsGuestNmiBlocking(pVCpu))
6038 {
6039 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6040 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6041 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6042 {
6043 /*
6044 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6045 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6046 * that virtual NMIs remain blocked until the IRET execution is completed.
6047 *
6048 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6049 */
6050 CPUMSetGuestNmiBlocking(pVCpu, true);
6051 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6052 }
6053 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6054 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6055 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6056 {
6057 /*
6058 * Execution of IRET caused an EPT violation, page-modification log-full event or
6059 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6060 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6061 * that virtual NMIs remain blocked until the IRET execution is completed.
6062 *
6063 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6064 */
6065 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6066 {
6067 CPUMSetGuestNmiBlocking(pVCpu, true);
6068 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6069 }
6070 }
6071 }
6072
6073 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6074 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6075 return rcStrict;
6076}
6077
6078
6079#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6080/**
6081 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6082 * guest attempting to execute a VMX instruction.
6083 *
6084 * @returns Strict VBox status code (i.e. informational status codes too).
6085 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6086 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6087 *
6088 * @param pVCpu The cross context virtual CPU structure.
6089 * @param uExitReason The VM-exit reason.
6090 *
6091 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6092 * @remarks No-long-jump zone!!!
6093 */
6094static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6095{
6096 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6097 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6098
6099 /*
6100 * The physical CPU would have already checked the CPU mode/code segment.
6101 * We shall just assert here for paranoia.
6102 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6103 */
6104 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6105 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6106 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6107
6108 if (uExitReason == VMX_EXIT_VMXON)
6109 {
6110 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6111
6112 /*
6113 * We check CR4.VMXE because it is required to be always set while in VMX operation
6114 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6115 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6116 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6117 */
6118 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6119 {
6120 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6121 vmxHCSetPendingXcptUD(pVCpu);
6122 return VINF_HM_PENDING_XCPT;
6123 }
6124 }
6125 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6126 {
6127 /*
6128 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6129 * (other than VMXON), we need to raise a #UD.
6130 */
6131 Log4Func(("Not in VMX root mode -> #UD\n"));
6132 vmxHCSetPendingXcptUD(pVCpu);
6133 return VINF_HM_PENDING_XCPT;
6134 }
6135
6136 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6137 return VINF_SUCCESS;
6138}
6139
6140
6141/**
6142 * Decodes the memory operand of an instruction that caused a VM-exit.
6143 *
6144 * The Exit qualification field provides the displacement field for memory
6145 * operand instructions, if any.
6146 *
6147 * @returns Strict VBox status code (i.e. informational status codes too).
6148 * @retval VINF_SUCCESS if the operand was successfully decoded.
6149 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6150 * operand.
6151 * @param pVCpu The cross context virtual CPU structure.
6152 * @param uExitInstrInfo The VM-exit instruction information field.
6153 * @param enmMemAccess The memory operand's access type (read or write).
6154 * @param GCPtrDisp The instruction displacement field, if any. For
6155 * RIP-relative addressing pass RIP + displacement here.
6156 * @param pGCPtrMem Where to store the effective destination memory address.
6157 *
6158 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6159 * virtual-8086 mode hence skips those checks while verifying if the
6160 * segment is valid.
6161 */
6162static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6163 PRTGCPTR pGCPtrMem)
6164{
6165 Assert(pGCPtrMem);
6166 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6167 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6168 | CPUMCTX_EXTRN_CR0);
6169
6170 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6171 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6172 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6173
6174 VMXEXITINSTRINFO ExitInstrInfo;
6175 ExitInstrInfo.u = uExitInstrInfo;
6176 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6177 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6178 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6179 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6180 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6181 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6182 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6183 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6184 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6185
6186 /*
6187 * Validate instruction information.
6188 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6189 */
6190 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6191 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6192 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6193 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6194 AssertLogRelMsgReturn(fIsMemOperand,
6195 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6196
6197 /*
6198 * Compute the complete effective address.
6199 *
6200 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6201 * See AMD spec. 4.5.2 "Segment Registers".
6202 */
6203 RTGCPTR GCPtrMem = GCPtrDisp;
6204 if (fBaseRegValid)
6205 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6206 if (fIdxRegValid)
6207 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6208
6209 RTGCPTR const GCPtrOff = GCPtrMem;
6210 if ( !fIsLongMode
6211 || iSegReg >= X86_SREG_FS)
6212 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6213 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6214
6215 /*
6216 * Validate effective address.
6217 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6218 */
6219 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6220 Assert(cbAccess > 0);
6221 if (fIsLongMode)
6222 {
6223 if (X86_IS_CANONICAL(GCPtrMem))
6224 {
6225 *pGCPtrMem = GCPtrMem;
6226 return VINF_SUCCESS;
6227 }
6228
6229 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6230 * "Data Limit Checks in 64-bit Mode". */
6231 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6232 vmxHCSetPendingXcptGP(pVCpu, 0);
6233 return VINF_HM_PENDING_XCPT;
6234 }
6235
6236 /*
6237 * This is a watered down version of iemMemApplySegment().
6238 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6239 * and segment CPL/DPL checks are skipped.
6240 */
6241 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6242 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6243 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6244
6245 /* Check if the segment is present and usable. */
6246 if ( pSel->Attr.n.u1Present
6247 && !pSel->Attr.n.u1Unusable)
6248 {
6249 Assert(pSel->Attr.n.u1DescType);
6250 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6251 {
6252 /* Check permissions for the data segment. */
6253 if ( enmMemAccess == VMXMEMACCESS_WRITE
6254 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6255 {
6256 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6257 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6258 return VINF_HM_PENDING_XCPT;
6259 }
6260
6261 /* Check limits if it's a normal data segment. */
6262 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6263 {
6264 if ( GCPtrFirst32 > pSel->u32Limit
6265 || GCPtrLast32 > pSel->u32Limit)
6266 {
6267 Log4Func(("Data segment limit exceeded. "
6268 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6269 GCPtrLast32, pSel->u32Limit));
6270 if (iSegReg == X86_SREG_SS)
6271 vmxHCSetPendingXcptSS(pVCpu, 0);
6272 else
6273 vmxHCSetPendingXcptGP(pVCpu, 0);
6274 return VINF_HM_PENDING_XCPT;
6275 }
6276 }
6277 else
6278 {
6279 /* Check limits if it's an expand-down data segment.
6280 Note! The upper boundary is defined by the B bit, not the G bit! */
6281 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6282 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6283 {
6284 Log4Func(("Expand-down data segment limit exceeded. "
6285 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6286 GCPtrLast32, pSel->u32Limit));
6287 if (iSegReg == X86_SREG_SS)
6288 vmxHCSetPendingXcptSS(pVCpu, 0);
6289 else
6290 vmxHCSetPendingXcptGP(pVCpu, 0);
6291 return VINF_HM_PENDING_XCPT;
6292 }
6293 }
6294 }
6295 else
6296 {
6297 /* Check permissions for the code segment. */
6298 if ( enmMemAccess == VMXMEMACCESS_WRITE
6299 || ( enmMemAccess == VMXMEMACCESS_READ
6300 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6301 {
6302 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6303 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6304 vmxHCSetPendingXcptGP(pVCpu, 0);
6305 return VINF_HM_PENDING_XCPT;
6306 }
6307
6308 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6309 if ( GCPtrFirst32 > pSel->u32Limit
6310 || GCPtrLast32 > pSel->u32Limit)
6311 {
6312 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6313 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6314 if (iSegReg == X86_SREG_SS)
6315 vmxHCSetPendingXcptSS(pVCpu, 0);
6316 else
6317 vmxHCSetPendingXcptGP(pVCpu, 0);
6318 return VINF_HM_PENDING_XCPT;
6319 }
6320 }
6321 }
6322 else
6323 {
6324 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6325 vmxHCSetPendingXcptGP(pVCpu, 0);
6326 return VINF_HM_PENDING_XCPT;
6327 }
6328
6329 *pGCPtrMem = GCPtrMem;
6330 return VINF_SUCCESS;
6331}
6332#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6333
6334
6335/**
6336 * VM-exit helper for LMSW.
6337 */
6338static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6339{
6340 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6341 AssertRCReturn(rc, rc);
6342
6343 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6344 AssertMsg( rcStrict == VINF_SUCCESS
6345 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6346
6347 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6348 if (rcStrict == VINF_IEM_RAISED_XCPT)
6349 {
6350 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6351 rcStrict = VINF_SUCCESS;
6352 }
6353
6354 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6355 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6356 return rcStrict;
6357}
6358
6359
6360/**
6361 * VM-exit helper for CLTS.
6362 */
6363static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6364{
6365 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6366 AssertRCReturn(rc, rc);
6367
6368 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6369 AssertMsg( rcStrict == VINF_SUCCESS
6370 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6371
6372 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6373 if (rcStrict == VINF_IEM_RAISED_XCPT)
6374 {
6375 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6376 rcStrict = VINF_SUCCESS;
6377 }
6378
6379 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6380 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6381 return rcStrict;
6382}
6383
6384
6385/**
6386 * VM-exit helper for MOV from CRx (CRx read).
6387 */
6388static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6389{
6390 Assert(iCrReg < 16);
6391 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6392
6393 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6394 AssertRCReturn(rc, rc);
6395
6396 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6397 AssertMsg( rcStrict == VINF_SUCCESS
6398 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6399
6400 if (iGReg == X86_GREG_xSP)
6401 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6402 else
6403 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6404#ifdef VBOX_WITH_STATISTICS
6405 switch (iCrReg)
6406 {
6407 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6408 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6409 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6410 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6411 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6412 }
6413#endif
6414 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6415 return rcStrict;
6416}
6417
6418
6419/**
6420 * VM-exit helper for MOV to CRx (CRx write).
6421 */
6422static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6423{
6424 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6425
6426 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6427 AssertMsg( rcStrict == VINF_SUCCESS
6428 || rcStrict == VINF_IEM_RAISED_XCPT
6429 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6430
6431 switch (iCrReg)
6432 {
6433 case 0:
6434 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6435 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6436 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6437 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6438 break;
6439
6440 case 2:
6441 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6442 /* Nothing to do here, CR2 it's not part of the VMCS. */
6443 break;
6444
6445 case 3:
6446 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6447 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6448 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6449 break;
6450
6451 case 4:
6452 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6453 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6454#ifndef IN_NEM_DARWIN
6455 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6456 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6457#else
6458 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6459#endif
6460 break;
6461
6462 case 8:
6463 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6464 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6465 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6466 break;
6467
6468 default:
6469 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6470 break;
6471 }
6472
6473 if (rcStrict == VINF_IEM_RAISED_XCPT)
6474 {
6475 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6476 rcStrict = VINF_SUCCESS;
6477 }
6478 return rcStrict;
6479}
6480
6481
6482/**
6483 * VM-exit exception handler for \#PF (Page-fault exception).
6484 *
6485 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6486 */
6487static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6488{
6489 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6490 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6491
6492#ifndef IN_NEM_DARWIN
6493 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6494 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6495 { /* likely */ }
6496 else
6497#endif
6498 {
6499#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6500 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6501#endif
6502 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6503 if (!pVmxTransient->fVectoringDoublePF)
6504 {
6505 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6506 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6507 }
6508 else
6509 {
6510 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6511 Assert(!pVmxTransient->fIsNestedGuest);
6512 vmxHCSetPendingXcptDF(pVCpu);
6513 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6514 }
6515 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6516 return VINF_SUCCESS;
6517 }
6518
6519 Assert(!pVmxTransient->fIsNestedGuest);
6520
6521 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6522 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6523 if (pVmxTransient->fVectoringPF)
6524 {
6525 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6526 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6527 }
6528
6529 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6530 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6531 AssertRCReturn(rc, rc);
6532
6533 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6534 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6535
6536 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6537 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6538
6539 Log4Func(("#PF: rc=%Rrc\n", rc));
6540 if (rc == VINF_SUCCESS)
6541 {
6542 /*
6543 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6544 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6545 */
6546 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6547 TRPMResetTrap(pVCpu);
6548 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6549 return rc;
6550 }
6551
6552 if (rc == VINF_EM_RAW_GUEST_TRAP)
6553 {
6554 if (!pVmxTransient->fVectoringDoublePF)
6555 {
6556 /* It's a guest page fault and needs to be reflected to the guest. */
6557 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6558 TRPMResetTrap(pVCpu);
6559 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6560 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6561 uGstErrorCode, pVmxTransient->uExitQual);
6562 }
6563 else
6564 {
6565 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6566 TRPMResetTrap(pVCpu);
6567 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6568 vmxHCSetPendingXcptDF(pVCpu);
6569 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6570 }
6571
6572 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6573 return VINF_SUCCESS;
6574 }
6575
6576 TRPMResetTrap(pVCpu);
6577 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6578 return rc;
6579}
6580
6581
6582/**
6583 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6584 *
6585 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6586 */
6587static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6588{
6589 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6590 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6591
6592 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6593 AssertRCReturn(rc, rc);
6594
6595 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6596 {
6597 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6598 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6599
6600 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6601 * provides VM-exit instruction length. If this causes problem later,
6602 * disassemble the instruction like it's done on AMD-V. */
6603 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6604 AssertRCReturn(rc2, rc2);
6605 return rc;
6606 }
6607
6608 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6609 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6610 return VINF_SUCCESS;
6611}
6612
6613
6614/**
6615 * VM-exit exception handler for \#BP (Breakpoint exception).
6616 *
6617 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6618 */
6619static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6620{
6621 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6622 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6623
6624 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6625 AssertRCReturn(rc, rc);
6626
6627 VBOXSTRICTRC rcStrict;
6628 if (!pVmxTransient->fIsNestedGuest)
6629 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6630 else
6631 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6632
6633 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6634 {
6635 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6636 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6637 rcStrict = VINF_SUCCESS;
6638 }
6639
6640 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6641 return rcStrict;
6642}
6643
6644
6645/**
6646 * VM-exit exception handler for \#AC (Alignment-check exception).
6647 *
6648 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6649 */
6650static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6651{
6652 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6653
6654 /*
6655 * Detect #ACs caused by host having enabled split-lock detection.
6656 * Emulate such instructions.
6657 */
6658 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6659 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6660 AssertRCReturn(rc, rc);
6661 /** @todo detect split lock in cpu feature? */
6662 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6663 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6664 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6665 || CPUMGetGuestCPL(pVCpu) != 3
6666 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6667 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6668 {
6669 /*
6670 * Check for debug/trace events and import state accordingly.
6671 */
6672 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6673 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6674 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6675#ifndef IN_NEM_DARWIN
6676 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6677#endif
6678 )
6679 {
6680 if (pVM->cCpus == 1)
6681 {
6682#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6683 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6684#else
6685 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6686#endif
6687 AssertRCReturn(rc, rc);
6688 }
6689 }
6690 else
6691 {
6692 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6693 AssertRCReturn(rc, rc);
6694
6695 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6696
6697 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6698 {
6699 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6700 if (rcStrict != VINF_SUCCESS)
6701 return rcStrict;
6702 }
6703 }
6704
6705 /*
6706 * Emulate the instruction.
6707 *
6708 * We have to ignore the LOCK prefix here as we must not retrigger the
6709 * detection on the host. This isn't all that satisfactory, though...
6710 */
6711 if (pVM->cCpus == 1)
6712 {
6713 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6714 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6715
6716 /** @todo For SMP configs we should do a rendezvous here. */
6717 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6718 if (rcStrict == VINF_SUCCESS)
6719#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6720 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6721 HM_CHANGED_GUEST_RIP
6722 | HM_CHANGED_GUEST_RFLAGS
6723 | HM_CHANGED_GUEST_GPRS_MASK
6724 | HM_CHANGED_GUEST_CS
6725 | HM_CHANGED_GUEST_SS);
6726#else
6727 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6728#endif
6729 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6730 {
6731 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6732 rcStrict = VINF_SUCCESS;
6733 }
6734 return rcStrict;
6735 }
6736 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6737 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6738 return VINF_EM_EMULATE_SPLIT_LOCK;
6739 }
6740
6741 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6742 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6743 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6744
6745 /* Re-inject it. We'll detect any nesting before getting here. */
6746 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6747 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6748 return VINF_SUCCESS;
6749}
6750
6751
6752/**
6753 * VM-exit exception handler for \#DB (Debug exception).
6754 *
6755 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6756 */
6757static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6758{
6759 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6760 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6761
6762 /*
6763 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6764 */
6765 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6766
6767 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6768 uint64_t const uDR6 = X86_DR6_INIT_VAL
6769 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6770 | X86_DR6_BD | X86_DR6_BS));
6771
6772 int rc;
6773 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6774 if (!pVmxTransient->fIsNestedGuest)
6775 {
6776 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6777
6778 /*
6779 * Prevents stepping twice over the same instruction when the guest is stepping using
6780 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6781 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6782 */
6783 if ( rc == VINF_EM_DBG_STEPPED
6784 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6785 {
6786 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6787 rc = VINF_EM_RAW_GUEST_TRAP;
6788 }
6789 }
6790 else
6791 rc = VINF_EM_RAW_GUEST_TRAP;
6792 Log6Func(("rc=%Rrc\n", rc));
6793 if (rc == VINF_EM_RAW_GUEST_TRAP)
6794 {
6795 /*
6796 * The exception was for the guest. Update DR6, DR7.GD and
6797 * IA32_DEBUGCTL.LBR before forwarding it.
6798 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6799 */
6800#ifndef IN_NEM_DARWIN
6801 VMMRZCallRing3Disable(pVCpu);
6802 HM_DISABLE_PREEMPT(pVCpu);
6803
6804 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6805 pCtx->dr[6] |= uDR6;
6806 if (CPUMIsGuestDebugStateActive(pVCpu))
6807 ASMSetDR6(pCtx->dr[6]);
6808
6809 HM_RESTORE_PREEMPT();
6810 VMMRZCallRing3Enable(pVCpu);
6811#else
6812 /** @todo */
6813#endif
6814
6815 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6816 AssertRCReturn(rc, rc);
6817
6818 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6819 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6820
6821 /* Paranoia. */
6822 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6823 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6824
6825 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6826 AssertRC(rc);
6827
6828 /*
6829 * Raise #DB in the guest.
6830 *
6831 * It is important to reflect exactly what the VM-exit gave us (preserving the
6832 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6833 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6834 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6835 *
6836 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6837 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6838 */
6839 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6840 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6841 return VINF_SUCCESS;
6842 }
6843
6844 /*
6845 * Not a guest trap, must be a hypervisor related debug event then.
6846 * Update DR6 in case someone is interested in it.
6847 */
6848 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6849 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6850 CPUMSetHyperDR6(pVCpu, uDR6);
6851
6852 return rc;
6853}
6854
6855
6856/**
6857 * Hacks its way around the lovely mesa driver's backdoor accesses.
6858 *
6859 * @sa hmR0SvmHandleMesaDrvGp.
6860 */
6861static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6862{
6863 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6864 RT_NOREF(pCtx);
6865
6866 /* For now we'll just skip the instruction. */
6867 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6868}
6869
6870
6871/**
6872 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6873 * backdoor logging w/o checking what it is running inside.
6874 *
6875 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6876 * backdoor port and magic numbers loaded in registers.
6877 *
6878 * @returns true if it is, false if it isn't.
6879 * @sa hmR0SvmIsMesaDrvGp.
6880 */
6881DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6882{
6883 /* 0xed: IN eAX,dx */
6884 uint8_t abInstr[1];
6885 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6886 return false;
6887
6888 /* Check that it is #GP(0). */
6889 if (pVmxTransient->uExitIntErrorCode != 0)
6890 return false;
6891
6892 /* Check magic and port. */
6893 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6894 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6895 if (pCtx->rax != UINT32_C(0x564d5868))
6896 return false;
6897 if (pCtx->dx != UINT32_C(0x5658))
6898 return false;
6899
6900 /* Flat ring-3 CS. */
6901 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6902 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6903 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6904 if (pCtx->cs.Attr.n.u2Dpl != 3)
6905 return false;
6906 if (pCtx->cs.u64Base != 0)
6907 return false;
6908
6909 /* Check opcode. */
6910 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6911 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6912 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6913 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6914 if (RT_FAILURE(rc))
6915 return false;
6916 if (abInstr[0] != 0xed)
6917 return false;
6918
6919 return true;
6920}
6921
6922
6923/**
6924 * VM-exit exception handler for \#GP (General-protection exception).
6925 *
6926 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6927 */
6928static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6929{
6930 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6931 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6932
6933 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6934 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6935#ifndef IN_NEM_DARWIN
6936 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6937 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6938 { /* likely */ }
6939 else
6940#endif
6941 {
6942#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6943# ifndef IN_NEM_DARWIN
6944 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6945# else
6946 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6947# endif
6948#endif
6949 /*
6950 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6951 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6952 */
6953 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6954 AssertRCReturn(rc, rc);
6955 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6956 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6957
6958 if ( pVmxTransient->fIsNestedGuest
6959 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6960 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6961 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6962 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6963 else
6964 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6965 return rc;
6966 }
6967
6968#ifndef IN_NEM_DARWIN
6969 Assert(CPUMIsGuestInRealModeEx(pCtx));
6970 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6971 Assert(!pVmxTransient->fIsNestedGuest);
6972
6973 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6974 AssertRCReturn(rc, rc);
6975
6976 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6977 if (rcStrict == VINF_SUCCESS)
6978 {
6979 if (!CPUMIsGuestInRealModeEx(pCtx))
6980 {
6981 /*
6982 * The guest is no longer in real-mode, check if we can continue executing the
6983 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6984 */
6985 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6986 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6987 {
6988 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6989 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6990 }
6991 else
6992 {
6993 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6994 rcStrict = VINF_EM_RESCHEDULE;
6995 }
6996 }
6997 else
6998 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6999 }
7000 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7001 {
7002 rcStrict = VINF_SUCCESS;
7003 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7004 }
7005 return VBOXSTRICTRC_VAL(rcStrict);
7006#endif
7007}
7008
7009
7010/**
7011 * VM-exit exception handler for \#DE (Divide Error).
7012 *
7013 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7014 */
7015static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7016{
7017 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7018 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7019
7020 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7021 AssertRCReturn(rc, rc);
7022
7023 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7024 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7025 {
7026 uint8_t cbInstr = 0;
7027 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7028 if (rc2 == VINF_SUCCESS)
7029 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7030 else if (rc2 == VERR_NOT_FOUND)
7031 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7032 else
7033 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7034 }
7035 else
7036 rcStrict = VINF_SUCCESS; /* Do nothing. */
7037
7038 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7039 if (RT_FAILURE(rcStrict))
7040 {
7041 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7042 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7043 rcStrict = VINF_SUCCESS;
7044 }
7045
7046 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7047 return VBOXSTRICTRC_VAL(rcStrict);
7048}
7049
7050
7051/**
7052 * VM-exit exception handler wrapper for all other exceptions that are not handled
7053 * by a specific handler.
7054 *
7055 * This simply re-injects the exception back into the VM without any special
7056 * processing.
7057 *
7058 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7059 */
7060static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7061{
7062 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7063
7064#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7065# ifndef IN_NEM_DARWIN
7066 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7067 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7068 ("uVector=%#x u32XcptBitmap=%#X32\n",
7069 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7070 NOREF(pVmcsInfo);
7071# endif
7072#endif
7073
7074 /*
7075 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7076 * would have been handled while checking exits due to event delivery.
7077 */
7078 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7079
7080#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7081 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
7082 AssertRCReturn(rc, rc);
7083 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7084#endif
7085
7086#ifdef VBOX_WITH_STATISTICS
7087 switch (uVector)
7088 {
7089 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7090 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7091 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7092 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7093 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7094 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7095 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7096 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7097 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7098 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7099 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7100 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7101 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7102 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7103 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7104 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7105 default:
7106 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7107 break;
7108 }
7109#endif
7110
7111 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7112 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7113 NOREF(uVector);
7114
7115 /* Re-inject the original exception into the guest. */
7116 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7117 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7118 return VINF_SUCCESS;
7119}
7120
7121
7122/**
7123 * VM-exit exception handler for all exceptions (except NMIs!).
7124 *
7125 * @remarks This may be called for both guests and nested-guests. Take care to not
7126 * make assumptions and avoid doing anything that is not relevant when
7127 * executing a nested-guest (e.g., Mesa driver hacks).
7128 */
7129static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7130{
7131 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7132
7133 /*
7134 * If this VM-exit occurred while delivering an event through the guest IDT, take
7135 * action based on the return code and additional hints (e.g. for page-faults)
7136 * that will be updated in the VMX transient structure.
7137 */
7138 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7139 if (rcStrict == VINF_SUCCESS)
7140 {
7141 /*
7142 * If an exception caused a VM-exit due to delivery of an event, the original
7143 * event may have to be re-injected into the guest. We shall reinject it and
7144 * continue guest execution. However, page-fault is a complicated case and
7145 * needs additional processing done in vmxHCExitXcptPF().
7146 */
7147 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7148 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7149 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7150 || uVector == X86_XCPT_PF)
7151 {
7152 switch (uVector)
7153 {
7154 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7155 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7156 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7157 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7158 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7159 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7160 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7161 default:
7162 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7163 }
7164 }
7165 /* else: inject pending event before resuming guest execution. */
7166 }
7167 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7168 {
7169 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7170 rcStrict = VINF_SUCCESS;
7171 }
7172
7173 return rcStrict;
7174}
7175/** @} */
7176
7177
7178/** @name VM-exit handlers.
7179 * @{
7180 */
7181/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7182/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7183/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7184
7185/**
7186 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7187 */
7188HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7189{
7190 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7191 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7192
7193#ifndef IN_NEM_DARWIN
7194 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7195 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7196 return VINF_SUCCESS;
7197 return VINF_EM_RAW_INTERRUPT;
7198#else
7199 return VINF_SUCCESS;
7200#endif
7201}
7202
7203
7204/**
7205 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7206 * VM-exit.
7207 */
7208HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7209{
7210 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7211 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7212
7213 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7214
7215 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7216 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7217 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7218
7219 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7220 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7221 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7222 NOREF(pVmcsInfo);
7223
7224 VBOXSTRICTRC rcStrict;
7225 switch (uExitIntType)
7226 {
7227#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7228 /*
7229 * Host physical NMIs:
7230 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7231 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7232 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7233 *
7234 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7235 * See Intel spec. 27.5.5 "Updating Non-Register State".
7236 */
7237 case VMX_EXIT_INT_INFO_TYPE_NMI:
7238 {
7239 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7240 break;
7241 }
7242#endif
7243
7244 /*
7245 * Privileged software exceptions (#DB from ICEBP),
7246 * Software exceptions (#BP and #OF),
7247 * Hardware exceptions:
7248 * Process the required exceptions and resume guest execution if possible.
7249 */
7250 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7251 Assert(uVector == X86_XCPT_DB);
7252 RT_FALL_THRU();
7253 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7254 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7255 RT_FALL_THRU();
7256 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7257 {
7258 NOREF(uVector);
7259 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7260 | HMVMX_READ_EXIT_INSTR_LEN
7261 | HMVMX_READ_IDT_VECTORING_INFO
7262 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7263 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7264 break;
7265 }
7266
7267 default:
7268 {
7269 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7270 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7271 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7272 break;
7273 }
7274 }
7275
7276 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7277 return rcStrict;
7278}
7279
7280
7281/**
7282 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7283 */
7284HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7285{
7286 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7287
7288 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7289 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7290 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7291
7292 /* Evaluate and deliver pending events and resume guest execution. */
7293 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7294 return VINF_SUCCESS;
7295}
7296
7297
7298/**
7299 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7300 */
7301HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7302{
7303 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7304
7305 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7306 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7307 {
7308 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7309 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7310 }
7311
7312 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7313
7314 /*
7315 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7316 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7317 */
7318 uint32_t fIntrState;
7319 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7320 AssertRC(rc);
7321 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7322 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7323 {
7324 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7325 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7326
7327 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7328 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7329 AssertRC(rc);
7330 }
7331
7332 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7333 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7334
7335 /* Evaluate and deliver pending events and resume guest execution. */
7336 return VINF_SUCCESS;
7337}
7338
7339
7340/**
7341 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7342 */
7343HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7344{
7345 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7346 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7347}
7348
7349
7350/**
7351 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7352 */
7353HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7354{
7355 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7356 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7357}
7358
7359
7360/**
7361 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7362 */
7363HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7364{
7365 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7366
7367 /*
7368 * Get the state we need and update the exit history entry.
7369 */
7370 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7371 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7372 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7373 AssertRCReturn(rc, rc);
7374
7375 VBOXSTRICTRC rcStrict;
7376 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7377 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7378 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7379 if (!pExitRec)
7380 {
7381 /*
7382 * Regular CPUID instruction execution.
7383 */
7384 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7385 if (rcStrict == VINF_SUCCESS)
7386 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7387 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7388 {
7389 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7390 rcStrict = VINF_SUCCESS;
7391 }
7392 }
7393 else
7394 {
7395 /*
7396 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7397 */
7398 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7399 AssertRCReturn(rc2, rc2);
7400
7401 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7402 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7403
7404 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7405 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7406
7407 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7408 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7409 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7410 }
7411 return rcStrict;
7412}
7413
7414
7415/**
7416 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7417 */
7418HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7419{
7420 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7421
7422 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7423 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7424 AssertRCReturn(rc, rc);
7425
7426 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7427 return VINF_EM_RAW_EMULATE_INSTR;
7428
7429 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7430 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7431}
7432
7433
7434/**
7435 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7436 */
7437HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7438{
7439 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7440
7441 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7442 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7443 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7444 AssertRCReturn(rc, rc);
7445
7446 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7447 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7448 {
7449 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7450 we must reset offsetting on VM-entry. See @bugref{6634}. */
7451 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7452 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7453 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7454 }
7455 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7456 {
7457 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7458 rcStrict = VINF_SUCCESS;
7459 }
7460 return rcStrict;
7461}
7462
7463
7464/**
7465 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7466 */
7467HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7468{
7469 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7470
7471 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7472 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7473 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7474 AssertRCReturn(rc, rc);
7475
7476 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7477 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7478 {
7479 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7480 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7481 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7482 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7483 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7484 }
7485 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7486 {
7487 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7488 rcStrict = VINF_SUCCESS;
7489 }
7490 return rcStrict;
7491}
7492
7493
7494/**
7495 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7496 */
7497HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7498{
7499 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7500
7501 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7502 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7503 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7504 AssertRCReturn(rc, rc);
7505
7506 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7507 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7508 if (RT_LIKELY(rc == VINF_SUCCESS))
7509 {
7510 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7511 Assert(pVmxTransient->cbExitInstr == 2);
7512 }
7513 else
7514 {
7515 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7516 rc = VERR_EM_INTERPRETER;
7517 }
7518 return rc;
7519}
7520
7521
7522/**
7523 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7524 */
7525HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7526{
7527 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7528
7529 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7530 if (EMAreHypercallInstructionsEnabled(pVCpu))
7531 {
7532 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7533 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7534 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7535 AssertRCReturn(rc, rc);
7536
7537 /* Perform the hypercall. */
7538 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7539 if (rcStrict == VINF_SUCCESS)
7540 {
7541 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7542 AssertRCReturn(rc, rc);
7543 }
7544 else
7545 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7546 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7547 || RT_FAILURE(rcStrict));
7548
7549 /* If the hypercall changes anything other than guest's general-purpose registers,
7550 we would need to reload the guest changed bits here before VM-entry. */
7551 }
7552 else
7553 Log4Func(("Hypercalls not enabled\n"));
7554
7555 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7556 if (RT_FAILURE(rcStrict))
7557 {
7558 vmxHCSetPendingXcptUD(pVCpu);
7559 rcStrict = VINF_SUCCESS;
7560 }
7561
7562 return rcStrict;
7563}
7564
7565
7566/**
7567 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7568 */
7569HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7570{
7571 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7572#ifndef IN_NEM_DARWIN
7573 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7574#endif
7575
7576 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7577 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7578 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7579 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7580 AssertRCReturn(rc, rc);
7581
7582 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7583
7584 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7585 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7586 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7587 {
7588 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7589 rcStrict = VINF_SUCCESS;
7590 }
7591 else
7592 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7593 VBOXSTRICTRC_VAL(rcStrict)));
7594 return rcStrict;
7595}
7596
7597
7598/**
7599 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7600 */
7601HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7602{
7603 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7604
7605 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7606 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7607 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7608 AssertRCReturn(rc, rc);
7609
7610 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7611 if (rcStrict == VINF_SUCCESS)
7612 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7613 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7614 {
7615 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7616 rcStrict = VINF_SUCCESS;
7617 }
7618
7619 return rcStrict;
7620}
7621
7622
7623/**
7624 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7625 */
7626HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7627{
7628 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7629
7630 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7631 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7632 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7633 AssertRCReturn(rc, rc);
7634
7635 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7636 if (RT_SUCCESS(rcStrict))
7637 {
7638 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7639 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7640 rcStrict = VINF_SUCCESS;
7641 }
7642
7643 return rcStrict;
7644}
7645
7646
7647/**
7648 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7649 * VM-exit.
7650 */
7651HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7652{
7653 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7654 return VINF_EM_RESET;
7655}
7656
7657
7658/**
7659 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7660 */
7661HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7662{
7663 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7664
7665 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7666 AssertRCReturn(rc, rc);
7667
7668 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7669 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7670 rc = VINF_SUCCESS;
7671 else
7672 rc = VINF_EM_HALT;
7673
7674 if (rc != VINF_SUCCESS)
7675 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7676 return rc;
7677}
7678
7679
7680#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7681/**
7682 * VM-exit handler for instructions that result in a \#UD exception delivered to
7683 * the guest.
7684 */
7685HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7686{
7687 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7688 vmxHCSetPendingXcptUD(pVCpu);
7689 return VINF_SUCCESS;
7690}
7691#endif
7692
7693
7694/**
7695 * VM-exit handler for expiry of the VMX-preemption timer.
7696 */
7697HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7698{
7699 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7700
7701 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7702 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7703Log12(("vmxHCExitPreemptTimer:\n"));
7704
7705 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7707 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7708 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7709 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7710}
7711
7712
7713/**
7714 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7715 */
7716HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7717{
7718 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7719
7720 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7721 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7722 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7723 AssertRCReturn(rc, rc);
7724
7725 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7726 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7727 : HM_CHANGED_RAISED_XCPT_MASK);
7728
7729#ifndef IN_NEM_DARWIN
7730 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7731 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7732 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7733 {
7734 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7735 hmR0VmxUpdateStartVmFunction(pVCpu);
7736 }
7737#endif
7738
7739 return rcStrict;
7740}
7741
7742
7743/**
7744 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7745 */
7746HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7747{
7748 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7749
7750 /** @todo Enable the new code after finding a reliably guest test-case. */
7751#if 1
7752 return VERR_EM_INTERPRETER;
7753#else
7754 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7755 | HMVMX_READ_EXIT_INSTR_INFO
7756 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7757 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7758 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7759 AssertRCReturn(rc, rc);
7760
7761 /* Paranoia. Ensure this has a memory operand. */
7762 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7763
7764 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7765 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7766 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7767 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7768
7769 RTGCPTR GCPtrDesc;
7770 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7771
7772 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7773 GCPtrDesc, uType);
7774 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7775 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7776 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7777 {
7778 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7779 rcStrict = VINF_SUCCESS;
7780 }
7781 return rcStrict;
7782#endif
7783}
7784
7785
7786/**
7787 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7788 * VM-exit.
7789 */
7790HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7791{
7792 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7793 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7794 AssertRCReturn(rc, rc);
7795
7796 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7797 if (RT_FAILURE(rc))
7798 return rc;
7799
7800 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7801 NOREF(uInvalidReason);
7802
7803#ifdef VBOX_STRICT
7804 uint32_t fIntrState;
7805 uint64_t u64Val;
7806 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
7807 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7808 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7809
7810 Log4(("uInvalidReason %u\n", uInvalidReason));
7811 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7812 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7813 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7814
7815 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7816 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7817 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7818 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7819 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7820 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7821 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7822 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7823 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7824 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7825 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7826 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7827# ifndef IN_NEM_DARWIN
7828 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7829 {
7830 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7831 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7832 }
7833
7834 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7835# endif
7836#endif
7837
7838 return VERR_VMX_INVALID_GUEST_STATE;
7839}
7840
7841/**
7842 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7843 */
7844HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7845{
7846 /*
7847 * Cumulative notes of all recognized but unexpected VM-exits.
7848 *
7849 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7850 * nested-paging is used.
7851 *
7852 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7853 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7854 * this function (and thereby stop VM execution) for handling such instructions.
7855 *
7856 *
7857 * VMX_EXIT_INIT_SIGNAL:
7858 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7859 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7860 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7861 *
7862 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7863 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7864 * See Intel spec. "23.8 Restrictions on VMX operation".
7865 *
7866 * VMX_EXIT_SIPI:
7867 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7868 * activity state is used. We don't make use of it as our guests don't have direct
7869 * access to the host local APIC.
7870 *
7871 * See Intel spec. 25.3 "Other Causes of VM-exits".
7872 *
7873 * VMX_EXIT_IO_SMI:
7874 * VMX_EXIT_SMI:
7875 * This can only happen if we support dual-monitor treatment of SMI, which can be
7876 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7877 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7878 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7879 *
7880 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7881 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7882 *
7883 * VMX_EXIT_ERR_MSR_LOAD:
7884 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7885 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7886 * execution.
7887 *
7888 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7889 *
7890 * VMX_EXIT_ERR_MACHINE_CHECK:
7891 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7892 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7893 * #MC exception abort class exception is raised. We thus cannot assume a
7894 * reasonable chance of continuing any sort of execution and we bail.
7895 *
7896 * See Intel spec. 15.1 "Machine-check Architecture".
7897 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7898 *
7899 * VMX_EXIT_PML_FULL:
7900 * VMX_EXIT_VIRTUALIZED_EOI:
7901 * VMX_EXIT_APIC_WRITE:
7902 * We do not currently support any of these features and thus they are all unexpected
7903 * VM-exits.
7904 *
7905 * VMX_EXIT_GDTR_IDTR_ACCESS:
7906 * VMX_EXIT_LDTR_TR_ACCESS:
7907 * VMX_EXIT_RDRAND:
7908 * VMX_EXIT_RSM:
7909 * VMX_EXIT_VMFUNC:
7910 * VMX_EXIT_ENCLS:
7911 * VMX_EXIT_RDSEED:
7912 * VMX_EXIT_XSAVES:
7913 * VMX_EXIT_XRSTORS:
7914 * VMX_EXIT_UMWAIT:
7915 * VMX_EXIT_TPAUSE:
7916 * VMX_EXIT_LOADIWKEY:
7917 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7918 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7919 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7920 *
7921 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7922 */
7923 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7924 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7925 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7926}
7927
7928
7929/**
7930 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7931 */
7932HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7933{
7934 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7935
7936 /** @todo Optimize this: We currently drag in the whole MSR state
7937 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7938 * MSRs required. That would require changes to IEM and possibly CPUM too.
7939 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7940 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7941 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7942 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7943 switch (idMsr)
7944 {
7945 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7946 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7947 }
7948
7949 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7950 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7951 AssertRCReturn(rc, rc);
7952
7953 Log4Func(("ecx=%#RX32\n", idMsr));
7954
7955#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7956 Assert(!pVmxTransient->fIsNestedGuest);
7957 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7958 {
7959 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7960 && idMsr != MSR_K6_EFER)
7961 {
7962 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7963 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7964 }
7965 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7966 {
7967 Assert(pVmcsInfo->pvMsrBitmap);
7968 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7969 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7970 {
7971 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7972 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7973 }
7974 }
7975 }
7976#endif
7977
7978 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7979 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7980 if (rcStrict == VINF_SUCCESS)
7981 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7982 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7983 {
7984 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7985 rcStrict = VINF_SUCCESS;
7986 }
7987 else
7988 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7989 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7990
7991 return rcStrict;
7992}
7993
7994
7995/**
7996 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7997 */
7998HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7999{
8000 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8001
8002 /** @todo Optimize this: We currently drag in the whole MSR state
8003 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8004 * MSRs required. That would require changes to IEM and possibly CPUM too.
8005 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8006 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8007 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
8008
8009 /*
8010 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8011 * Although we don't need to fetch the base as it will be overwritten shortly, while
8012 * loading guest-state we would also load the entire segment register including limit
8013 * and attributes and thus we need to load them here.
8014 */
8015 switch (idMsr)
8016 {
8017 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
8018 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
8019 }
8020
8021 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8022 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8023 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
8024 AssertRCReturn(rc, rc);
8025
8026 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8027
8028 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8029 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8030
8031 if (rcStrict == VINF_SUCCESS)
8032 {
8033 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8034
8035 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8036 if ( idMsr == MSR_IA32_APICBASE
8037 || ( idMsr >= MSR_IA32_X2APIC_START
8038 && idMsr <= MSR_IA32_X2APIC_END))
8039 {
8040 /*
8041 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8042 * When full APIC register virtualization is implemented we'll have to make
8043 * sure APIC state is saved from the VMCS before IEM changes it.
8044 */
8045 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8046 }
8047 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8048 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8049 else if (idMsr == MSR_K6_EFER)
8050 {
8051 /*
8052 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8053 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8054 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8055 */
8056 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8057 }
8058
8059 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8060 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8061 {
8062 switch (idMsr)
8063 {
8064 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8065 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8066 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8067 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8068 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8069 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8070 default:
8071 {
8072#ifndef IN_NEM_DARWIN
8073 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8074 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8075 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8076 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8077#else
8078 AssertMsgFailed(("TODO\n"));
8079#endif
8080 break;
8081 }
8082 }
8083 }
8084#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8085 else
8086 {
8087 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8088 switch (idMsr)
8089 {
8090 case MSR_IA32_SYSENTER_CS:
8091 case MSR_IA32_SYSENTER_EIP:
8092 case MSR_IA32_SYSENTER_ESP:
8093 case MSR_K8_FS_BASE:
8094 case MSR_K8_GS_BASE:
8095 {
8096 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8097 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8098 }
8099
8100 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8101 default:
8102 {
8103 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8104 {
8105 /* EFER MSR writes are always intercepted. */
8106 if (idMsr != MSR_K6_EFER)
8107 {
8108 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8109 idMsr));
8110 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8111 }
8112 }
8113
8114 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8115 {
8116 Assert(pVmcsInfo->pvMsrBitmap);
8117 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8118 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8119 {
8120 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8121 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8122 }
8123 }
8124 break;
8125 }
8126 }
8127 }
8128#endif /* VBOX_STRICT */
8129 }
8130 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8131 {
8132 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8133 rcStrict = VINF_SUCCESS;
8134 }
8135 else
8136 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8137 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8138
8139 return rcStrict;
8140}
8141
8142
8143/**
8144 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8145 */
8146HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8147{
8148 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8149
8150 /** @todo The guest has likely hit a contended spinlock. We might want to
8151 * poke a schedule different guest VCPU. */
8152 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8153 if (RT_SUCCESS(rc))
8154 return VINF_EM_RAW_INTERRUPT;
8155
8156 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8157 return rc;
8158}
8159
8160
8161/**
8162 * VM-exit handler for when the TPR value is lowered below the specified
8163 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8164 */
8165HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8166{
8167 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8168 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8169
8170 /*
8171 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8172 * We'll re-evaluate pending interrupts and inject them before the next VM
8173 * entry so we can just continue execution here.
8174 */
8175 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8176 return VINF_SUCCESS;
8177}
8178
8179
8180/**
8181 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8182 * VM-exit.
8183 *
8184 * @retval VINF_SUCCESS when guest execution can continue.
8185 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8186 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8187 * incompatible guest state for VMX execution (real-on-v86 case).
8188 */
8189HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8190{
8191 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8192 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8193
8194 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8195 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8196 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8197
8198 VBOXSTRICTRC rcStrict;
8199 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8200 uint64_t const uExitQual = pVmxTransient->uExitQual;
8201 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8202 switch (uAccessType)
8203 {
8204 /*
8205 * MOV to CRx.
8206 */
8207 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8208 {
8209 /*
8210 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8211 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8212 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8213 * PAE PDPTEs as well.
8214 */
8215 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8216 AssertRCReturn(rc, rc);
8217
8218 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8219#ifndef IN_NEM_DARWIN
8220 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8221#endif
8222 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8223 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8224
8225 /*
8226 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8227 * - When nested paging isn't used.
8228 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8229 * - We are executing in the VM debug loop.
8230 */
8231#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8232# ifndef IN_NEM_DARWIN
8233 Assert( iCrReg != 3
8234 || !VM_IS_VMX_NESTED_PAGING(pVM)
8235 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8236 || pVCpu->hmr0.s.fUsingDebugLoop);
8237# else
8238 Assert( iCrReg != 3
8239 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8240# endif
8241#endif
8242
8243 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8244 Assert( iCrReg != 8
8245 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8246
8247 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8248 AssertMsg( rcStrict == VINF_SUCCESS
8249 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8250
8251#ifndef IN_NEM_DARWIN
8252 /*
8253 * This is a kludge for handling switches back to real mode when we try to use
8254 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8255 * deal with special selector values, so we have to return to ring-3 and run
8256 * there till the selector values are V86 mode compatible.
8257 *
8258 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8259 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8260 * this function.
8261 */
8262 if ( iCrReg == 0
8263 && rcStrict == VINF_SUCCESS
8264 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8265 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8266 && (uOldCr0 & X86_CR0_PE)
8267 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8268 {
8269 /** @todo Check selectors rather than returning all the time. */
8270 Assert(!pVmxTransient->fIsNestedGuest);
8271 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8272 rcStrict = VINF_EM_RESCHEDULE_REM;
8273 }
8274#endif
8275
8276 break;
8277 }
8278
8279 /*
8280 * MOV from CRx.
8281 */
8282 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8283 {
8284 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8285 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8286
8287 /*
8288 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8289 * - When nested paging isn't used.
8290 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8291 * - We are executing in the VM debug loop.
8292 */
8293#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8294# ifndef IN_NEM_DARWIN
8295 Assert( iCrReg != 3
8296 || !VM_IS_VMX_NESTED_PAGING(pVM)
8297 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8298 || pVCpu->hmr0.s.fLeaveDone);
8299# else
8300 Assert( iCrReg != 3
8301 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8302# endif
8303#endif
8304
8305 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8306 Assert( iCrReg != 8
8307 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8308
8309 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8310 break;
8311 }
8312
8313 /*
8314 * CLTS (Clear Task-Switch Flag in CR0).
8315 */
8316 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8317 {
8318 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8319 break;
8320 }
8321
8322 /*
8323 * LMSW (Load Machine-Status Word into CR0).
8324 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8325 */
8326 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8327 {
8328 RTGCPTR GCPtrEffDst;
8329 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8330 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8331 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8332 if (fMemOperand)
8333 {
8334 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8335 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8336 }
8337 else
8338 GCPtrEffDst = NIL_RTGCPTR;
8339 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8340 break;
8341 }
8342
8343 default:
8344 {
8345 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8346 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8347 }
8348 }
8349
8350 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8351 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8352 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8353
8354 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8355 NOREF(pVM);
8356 return rcStrict;
8357}
8358
8359
8360/**
8361 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8362 * VM-exit.
8363 */
8364HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8365{
8366 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8367 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8368
8369 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8370 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8371 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8372 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8373 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8374 | CPUMCTX_EXTRN_EFER);
8375 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8376 AssertRCReturn(rc, rc);
8377
8378 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8379 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8380 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8381 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8382 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8383 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8384 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8385 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8386
8387 /*
8388 * Update exit history to see if this exit can be optimized.
8389 */
8390 VBOXSTRICTRC rcStrict;
8391 PCEMEXITREC pExitRec = NULL;
8392 if ( !fGstStepping
8393 && !fDbgStepping)
8394 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8395 !fIOString
8396 ? !fIOWrite
8397 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8398 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8399 : !fIOWrite
8400 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8401 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8402 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8403 if (!pExitRec)
8404 {
8405 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8406 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8407
8408 uint32_t const cbValue = s_aIOSizes[uIOSize];
8409 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8410 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8411 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8412 if (fIOString)
8413 {
8414 /*
8415 * INS/OUTS - I/O String instruction.
8416 *
8417 * Use instruction-information if available, otherwise fall back on
8418 * interpreting the instruction.
8419 */
8420 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8421 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8422 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8423 if (fInsOutsInfo)
8424 {
8425 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8426 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8427 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8428 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8429 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8430 if (fIOWrite)
8431 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8432 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8433 else
8434 {
8435 /*
8436 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8437 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8438 * See Intel Instruction spec. for "INS".
8439 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8440 */
8441 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8442 }
8443 }
8444 else
8445 rcStrict = IEMExecOne(pVCpu);
8446
8447 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8448 fUpdateRipAlready = true;
8449 }
8450 else
8451 {
8452 /*
8453 * IN/OUT - I/O instruction.
8454 */
8455 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8456 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8457 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8458 if (fIOWrite)
8459 {
8460 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8461 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8462#ifndef IN_NEM_DARWIN
8463 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8464 && !pCtx->eflags.Bits.u1TF)
8465 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8466#endif
8467 }
8468 else
8469 {
8470 uint32_t u32Result = 0;
8471 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8472 if (IOM_SUCCESS(rcStrict))
8473 {
8474 /* Save result of I/O IN instr. in AL/AX/EAX. */
8475 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8476 }
8477#ifndef IN_NEM_DARWIN
8478 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8479 && !pCtx->eflags.Bits.u1TF)
8480 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8481#endif
8482 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8483 }
8484 }
8485
8486 if (IOM_SUCCESS(rcStrict))
8487 {
8488 if (!fUpdateRipAlready)
8489 {
8490 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8491 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8492 }
8493
8494 /*
8495 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8496 * while booting Fedora 17 64-bit guest.
8497 *
8498 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8499 */
8500 if (fIOString)
8501 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8502
8503 /*
8504 * If any I/O breakpoints are armed, we need to check if one triggered
8505 * and take appropriate action.
8506 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8507 */
8508 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8509 AssertRCReturn(rc, rc);
8510
8511 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8512 * execution engines about whether hyper BPs and such are pending. */
8513 uint32_t const uDr7 = pCtx->dr[7];
8514 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8515 && X86_DR7_ANY_RW_IO(uDr7)
8516 && (pCtx->cr4 & X86_CR4_DE))
8517 || DBGFBpIsHwIoArmed(pVM)))
8518 {
8519 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8520
8521#ifndef IN_NEM_DARWIN
8522 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8523 VMMRZCallRing3Disable(pVCpu);
8524 HM_DISABLE_PREEMPT(pVCpu);
8525
8526 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8527
8528 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8529 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8530 {
8531 /* Raise #DB. */
8532 if (fIsGuestDbgActive)
8533 ASMSetDR6(pCtx->dr[6]);
8534 if (pCtx->dr[7] != uDr7)
8535 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8536
8537 vmxHCSetPendingXcptDB(pVCpu);
8538 }
8539 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8540 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8541 else if ( rcStrict2 != VINF_SUCCESS
8542 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8543 rcStrict = rcStrict2;
8544 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8545
8546 HM_RESTORE_PREEMPT();
8547 VMMRZCallRing3Enable(pVCpu);
8548#else
8549 /** @todo */
8550#endif
8551 }
8552 }
8553
8554#ifdef VBOX_STRICT
8555 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8556 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8557 Assert(!fIOWrite);
8558 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8559 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8560 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8561 Assert(fIOWrite);
8562 else
8563 {
8564# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8565 * statuses, that the VMM device and some others may return. See
8566 * IOM_SUCCESS() for guidance. */
8567 AssertMsg( RT_FAILURE(rcStrict)
8568 || rcStrict == VINF_SUCCESS
8569 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8570 || rcStrict == VINF_EM_DBG_BREAKPOINT
8571 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8572 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8573# endif
8574 }
8575#endif
8576 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8577 }
8578 else
8579 {
8580 /*
8581 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8582 */
8583 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8584 AssertRCReturn(rc2, rc2);
8585 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8586 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8587 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8588 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8589 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8590 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8591
8592 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8593 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8594
8595 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8596 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8597 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8598 }
8599 return rcStrict;
8600}
8601
8602
8603/**
8604 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8605 * VM-exit.
8606 */
8607HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8608{
8609 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8610
8611 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8612 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8613 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8614 {
8615 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8616 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8617 {
8618 uint32_t uErrCode;
8619 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8620 {
8621 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8622 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8623 }
8624 else
8625 uErrCode = 0;
8626
8627 RTGCUINTPTR GCPtrFaultAddress;
8628 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8629 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8630 else
8631 GCPtrFaultAddress = 0;
8632
8633 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8634
8635 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8636 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8637
8638 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8639 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8640 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8641 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8642 }
8643 }
8644
8645 /* Fall back to the interpreter to emulate the task-switch. */
8646 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8647 return VERR_EM_INTERPRETER;
8648}
8649
8650
8651/**
8652 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8653 */
8654HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8655{
8656 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8657
8658 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8659 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8660 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8661 AssertRC(rc);
8662 return VINF_EM_DBG_STEPPED;
8663}
8664
8665
8666/**
8667 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8668 */
8669HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8670{
8671 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8672 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8673
8674 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8675 | HMVMX_READ_EXIT_INSTR_LEN
8676 | HMVMX_READ_EXIT_INTERRUPTION_INFO
8677 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
8678 | HMVMX_READ_IDT_VECTORING_INFO
8679 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8680
8681 /*
8682 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8683 */
8684 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8685 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8686 {
8687 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8688 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8689 {
8690 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8691 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8692 }
8693 }
8694 else
8695 {
8696 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8697 return rcStrict;
8698 }
8699
8700 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8701 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8702 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8703 AssertRCReturn(rc, rc);
8704
8705 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8706 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8707 switch (uAccessType)
8708 {
8709#ifndef IN_NEM_DARWIN
8710 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8711 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8712 {
8713 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8714 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8715 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8716
8717 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8718 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8719 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8720 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8721 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8722
8723 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8724 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8725 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8726 if ( rcStrict == VINF_SUCCESS
8727 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8728 || rcStrict == VERR_PAGE_NOT_PRESENT)
8729 {
8730 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8731 | HM_CHANGED_GUEST_APIC_TPR);
8732 rcStrict = VINF_SUCCESS;
8733 }
8734 break;
8735 }
8736#else
8737 /** @todo */
8738#endif
8739
8740 default:
8741 {
8742 Log4Func(("uAccessType=%#x\n", uAccessType));
8743 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8744 break;
8745 }
8746 }
8747
8748 if (rcStrict != VINF_SUCCESS)
8749 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8750 return rcStrict;
8751}
8752
8753
8754/**
8755 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8756 * VM-exit.
8757 */
8758HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8759{
8760 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8761 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8762
8763 /*
8764 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8765 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8766 * must emulate the MOV DRx access.
8767 */
8768 if (!pVmxTransient->fIsNestedGuest)
8769 {
8770 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8771 if (pVmxTransient->fWasGuestDebugStateActive)
8772 {
8773 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8774 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8775 }
8776
8777 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8778 && !pVmxTransient->fWasHyperDebugStateActive)
8779 {
8780 Assert(!DBGFIsStepping(pVCpu));
8781 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8782
8783 /* Don't intercept MOV DRx any more. */
8784 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8785 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8786 AssertRC(rc);
8787
8788#ifndef IN_NEM_DARWIN
8789 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8790 VMMRZCallRing3Disable(pVCpu);
8791 HM_DISABLE_PREEMPT(pVCpu);
8792
8793 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8794 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8795 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8796
8797 HM_RESTORE_PREEMPT();
8798 VMMRZCallRing3Enable(pVCpu);
8799#else
8800 CPUMR3NemActivateGuestDebugState(pVCpu);
8801 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8802 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8803#endif
8804
8805#ifdef VBOX_WITH_STATISTICS
8806 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8807 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8808 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8809 else
8810 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8811#endif
8812 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8813 return VINF_SUCCESS;
8814 }
8815 }
8816
8817 /*
8818 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8819 * The EFER MSR is always up-to-date.
8820 * Update the segment registers and DR7 from the CPU.
8821 */
8822 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8823 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8824 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8825 AssertRCReturn(rc, rc);
8826 Log4Func(("cs:rip=%#04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
8827
8828 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8829 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8830 {
8831 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8832 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8833 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8834 if (RT_SUCCESS(rc))
8835 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8836 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8837 }
8838 else
8839 {
8840 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8841 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8842 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8843 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8844 }
8845
8846 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8847 if (RT_SUCCESS(rc))
8848 {
8849 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8850 AssertRCReturn(rc2, rc2);
8851 return VINF_SUCCESS;
8852 }
8853 return rc;
8854}
8855
8856
8857/**
8858 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8859 * Conditional VM-exit.
8860 */
8861HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8862{
8863 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8864
8865#ifndef IN_NEM_DARWIN
8866 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8867
8868 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
8869 | HMVMX_READ_EXIT_INTERRUPTION_INFO
8870 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
8871 | HMVMX_READ_IDT_VECTORING_INFO
8872 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
8873 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
8874
8875 /*
8876 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8877 */
8878 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8879 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8880 {
8881 /*
8882 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8883 * instruction emulation to inject the original event. Otherwise, injecting the original event
8884 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8885 */
8886 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8887 { /* likely */ }
8888 else
8889 {
8890 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8891# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8892 /** @todo NSTVMX: Think about how this should be handled. */
8893 if (pVmxTransient->fIsNestedGuest)
8894 return VERR_VMX_IPE_3;
8895# endif
8896 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8897 }
8898 }
8899 else
8900 {
8901 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8902 return rcStrict;
8903 }
8904
8905 /*
8906 * Get sufficient state and update the exit history entry.
8907 */
8908 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8909 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8910 AssertRCReturn(rc, rc);
8911
8912 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8913 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8914 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8915 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8916 if (!pExitRec)
8917 {
8918 /*
8919 * If we succeed, resume guest execution.
8920 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8921 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8922 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8923 * weird case. See @bugref{6043}.
8924 */
8925 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8926 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8927/** @todo bird: We can probably just go straight to IOM here and assume that
8928 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8929 * well. However, we need to address that aliasing workarounds that
8930 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8931 *
8932 * Might also be interesting to see if we can get this done more or
8933 * less locklessly inside IOM. Need to consider the lookup table
8934 * updating and use a bit more carefully first (or do all updates via
8935 * rendezvous) */
8936 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8937 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8938 if ( rcStrict == VINF_SUCCESS
8939 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8940 || rcStrict == VERR_PAGE_NOT_PRESENT)
8941 {
8942 /* Successfully handled MMIO operation. */
8943 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8944 | HM_CHANGED_GUEST_APIC_TPR);
8945 rcStrict = VINF_SUCCESS;
8946 }
8947 }
8948 else
8949 {
8950 /*
8951 * Frequent exit or something needing probing. Call EMHistoryExec.
8952 */
8953 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8954 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8955
8956 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8957 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8958
8959 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8960 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8961 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8962 }
8963 return rcStrict;
8964#else
8965 AssertFailed();
8966 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8967#endif
8968}
8969
8970
8971/**
8972 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8973 * VM-exit.
8974 */
8975HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8976{
8977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8978#ifndef IN_NEM_DARWIN
8979 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8980
8981 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8982 | HMVMX_READ_EXIT_INSTR_LEN
8983 | HMVMX_READ_EXIT_INTERRUPTION_INFO
8984 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
8985 | HMVMX_READ_IDT_VECTORING_INFO
8986 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
8987 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
8988
8989 /*
8990 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8991 */
8992 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8993 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8994 {
8995 /*
8996 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8997 * we shall resolve the nested #PF and re-inject the original event.
8998 */
8999 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9000 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9001 }
9002 else
9003 {
9004 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9005 return rcStrict;
9006 }
9007
9008 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9009 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9010 AssertRCReturn(rc, rc);
9011
9012 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9013 uint64_t const uExitQual = pVmxTransient->uExitQual;
9014 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9015
9016 RTGCUINT uErrorCode = 0;
9017 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9018 uErrorCode |= X86_TRAP_PF_ID;
9019 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9020 uErrorCode |= X86_TRAP_PF_RW;
9021 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9022 uErrorCode |= X86_TRAP_PF_P;
9023
9024 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9025 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9026
9027 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9028
9029 /*
9030 * Handle the pagefault trap for the nested shadow table.
9031 */
9032 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9033 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
9034 TRPMResetTrap(pVCpu);
9035
9036 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9037 if ( rcStrict == VINF_SUCCESS
9038 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9039 || rcStrict == VERR_PAGE_NOT_PRESENT)
9040 {
9041 /* Successfully synced our nested page tables. */
9042 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9043 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9044 return VINF_SUCCESS;
9045 }
9046 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9047 return rcStrict;
9048
9049#else /* IN_NEM_DARWIN */
9050 PVM pVM = pVCpu->CTX_SUFF(pVM);
9051 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9052 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9053 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9054 vmxHCImportGuestRip(pVCpu);
9055 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
9056
9057 /*
9058 * Ask PGM for information about the given GCPhys. We need to check if we're
9059 * out of sync first.
9060 */
9061 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
9062 PGMPHYSNEMPAGEINFO Info;
9063 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9064 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9065 if (RT_SUCCESS(rc))
9066 {
9067 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9068 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9069 {
9070 if (State.fCanResume)
9071 {
9072 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9073 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9074 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9075 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9076 State.fDidSomething ? "" : " no-change"));
9077 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9078 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9079 return VINF_SUCCESS;
9080 }
9081 }
9082
9083 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9084 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9085 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9086 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9087 State.fDidSomething ? "" : " no-change"));
9088 }
9089 else
9090 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9091 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9092 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9093
9094 /*
9095 * Emulate the memory access, either access handler or special memory.
9096 */
9097 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9098 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9099 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9100 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9101 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9102
9103 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9104 AssertRCReturn(rc, rc);
9105
9106 VBOXSTRICTRC rcStrict;
9107 if (!pExitRec)
9108 rcStrict = IEMExecOne(pVCpu);
9109 else
9110 {
9111 /* Frequent access or probing. */
9112 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9113 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9114 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9115 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9116 }
9117
9118 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9119
9120 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9121 return rcStrict;
9122#endif /* IN_NEM_DARWIN */
9123}
9124
9125#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9126
9127/**
9128 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9129 */
9130HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9131{
9132 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9133
9134 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9135 | HMVMX_READ_EXIT_INSTR_INFO
9136 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9137 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9138 | CPUMCTX_EXTRN_HWVIRT
9139 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9140 AssertRCReturn(rc, rc);
9141
9142 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9143
9144 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9145 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9146
9147 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9148 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9149 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9150 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9151 {
9152 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9153 rcStrict = VINF_SUCCESS;
9154 }
9155 return rcStrict;
9156}
9157
9158
9159/**
9160 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9161 */
9162HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9163{
9164 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9165
9166 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9167 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9168 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9169 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9170 AssertRCReturn(rc, rc);
9171
9172 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9173
9174 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9175 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9176 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9177 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9178 {
9179 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9180 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9181 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9182 }
9183 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9184 return rcStrict;
9185}
9186
9187
9188/**
9189 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9190 */
9191HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9192{
9193 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9194
9195 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9196 | HMVMX_READ_EXIT_INSTR_INFO
9197 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9198 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9199 | CPUMCTX_EXTRN_HWVIRT
9200 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9201 AssertRCReturn(rc, rc);
9202
9203 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9204
9205 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9206 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9207
9208 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9209 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9210 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9211 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9212 {
9213 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9214 rcStrict = VINF_SUCCESS;
9215 }
9216 return rcStrict;
9217}
9218
9219
9220/**
9221 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9222 */
9223HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9224{
9225 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9226
9227 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9228 | HMVMX_READ_EXIT_INSTR_INFO
9229 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9230 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9231 | CPUMCTX_EXTRN_HWVIRT
9232 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9233 AssertRCReturn(rc, rc);
9234
9235 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9236
9237 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9238 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9239
9240 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9241 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9242 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9243 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9244 {
9245 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9246 rcStrict = VINF_SUCCESS;
9247 }
9248 return rcStrict;
9249}
9250
9251
9252/**
9253 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9254 */
9255HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9256{
9257 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9258
9259 /*
9260 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9261 * thus might not need to import the shadow VMCS state, it's safer just in case
9262 * code elsewhere dares look at unsynced VMCS fields.
9263 */
9264 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9265 | HMVMX_READ_EXIT_INSTR_INFO
9266 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9267 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9268 | CPUMCTX_EXTRN_HWVIRT
9269 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9270 AssertRCReturn(rc, rc);
9271
9272 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9273
9274 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9275 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9276 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9277
9278 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9280 {
9281 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9282
9283# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9284 /* Try for exit optimization. This is on the following instruction
9285 because it would be a waste of time to have to reinterpret the
9286 already decoded vmwrite instruction. */
9287 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9288 if (pExitRec)
9289 {
9290 /* Frequent access or probing. */
9291 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9292 AssertRCReturn(rc, rc);
9293
9294 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9295 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9296 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9297 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9298 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9299 }
9300# endif
9301 }
9302 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9303 {
9304 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9305 rcStrict = VINF_SUCCESS;
9306 }
9307 return rcStrict;
9308}
9309
9310
9311/**
9312 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9313 */
9314HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9315{
9316 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9317
9318 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9319 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9320 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9321 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9322 AssertRCReturn(rc, rc);
9323
9324 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9325
9326 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9327 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9328 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9329 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9330 {
9331 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9332 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9333 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9334 }
9335 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9336 return rcStrict;
9337}
9338
9339
9340/**
9341 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9342 */
9343HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9344{
9345 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9346
9347 /*
9348 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9349 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9350 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9351 */
9352 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9353 | HMVMX_READ_EXIT_INSTR_INFO
9354 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9355 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9356 | CPUMCTX_EXTRN_HWVIRT
9357 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9358 AssertRCReturn(rc, rc);
9359
9360 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9361
9362 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9363 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9364 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9365
9366 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9367 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9368 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9369 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9370 {
9371 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9372 rcStrict = VINF_SUCCESS;
9373 }
9374 return rcStrict;
9375}
9376
9377
9378/**
9379 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9380 */
9381HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9382{
9383 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9384
9385 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9386 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9387 | CPUMCTX_EXTRN_HWVIRT
9388 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9389 AssertRCReturn(rc, rc);
9390
9391 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9392
9393 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9394 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9395 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9396 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9397 {
9398 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9399 rcStrict = VINF_SUCCESS;
9400 }
9401 return rcStrict;
9402}
9403
9404
9405/**
9406 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9407 */
9408HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9409{
9410 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9411
9412 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9413 | HMVMX_READ_EXIT_INSTR_INFO
9414 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9415 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9416 | CPUMCTX_EXTRN_HWVIRT
9417 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9418 AssertRCReturn(rc, rc);
9419
9420 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9421
9422 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9423 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9424
9425 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9426 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9427 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9428 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9429 {
9430 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9431 rcStrict = VINF_SUCCESS;
9432 }
9433 return rcStrict;
9434}
9435
9436
9437/**
9438 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9439 */
9440HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9441{
9442 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9443
9444 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9445 | HMVMX_READ_EXIT_INSTR_INFO
9446 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9447 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9448 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9449 AssertRCReturn(rc, rc);
9450
9451 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9452
9453 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9454 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9455
9456 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9457 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9458 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9459 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9460 {
9461 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9462 rcStrict = VINF_SUCCESS;
9463 }
9464 return rcStrict;
9465}
9466
9467
9468# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9469/**
9470 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9471 */
9472HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9473{
9474 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9475
9476 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9477 | HMVMX_READ_EXIT_INSTR_INFO
9478 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9479 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9480 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9481 AssertRCReturn(rc, rc);
9482
9483 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9484
9485 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9486 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9487
9488 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9489 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9490 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9491 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9492 {
9493 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9494 rcStrict = VINF_SUCCESS;
9495 }
9496 return rcStrict;
9497}
9498# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9499#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9500/** @} */
9501
9502
9503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9504/** @name Nested-guest VM-exit handlers.
9505 * @{
9506 */
9507/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9508/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9509/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9510
9511/**
9512 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9513 * Conditional VM-exit.
9514 */
9515HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9516{
9517 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9518
9519 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9520
9521 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9522 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9523 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9524
9525 switch (uExitIntType)
9526 {
9527# ifndef IN_NEM_DARWIN
9528 /*
9529 * Physical NMIs:
9530 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9531 */
9532 case VMX_EXIT_INT_INFO_TYPE_NMI:
9533 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9534# endif
9535
9536 /*
9537 * Hardware exceptions,
9538 * Software exceptions,
9539 * Privileged software exceptions:
9540 * Figure out if the exception must be delivered to the guest or the nested-guest.
9541 */
9542 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9543 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9544 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9545 {
9546 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9547 | HMVMX_READ_EXIT_INSTR_LEN
9548 | HMVMX_READ_IDT_VECTORING_INFO
9549 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9550
9551 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9552 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9553 {
9554 /* Exit qualification is required for debug and page-fault exceptions. */
9555 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9556
9557 /*
9558 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9559 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9560 * length. However, if delivery of a software interrupt, software exception or privileged
9561 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9562 */
9563 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9564 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9565 pVmxTransient->uExitIntErrorCode,
9566 pVmxTransient->uIdtVectoringInfo,
9567 pVmxTransient->uIdtVectoringErrorCode);
9568#ifdef DEBUG_ramshankar
9569 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9570 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9571 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9572 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9573 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9574 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9575#endif
9576 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9577 }
9578
9579 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9580 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9581 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9582 }
9583
9584 /*
9585 * Software interrupts:
9586 * VM-exits cannot be caused by software interrupts.
9587 *
9588 * External interrupts:
9589 * This should only happen when "acknowledge external interrupts on VM-exit"
9590 * control is set. However, we never set this when executing a guest or
9591 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9592 * the guest.
9593 */
9594 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9595 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9596 default:
9597 {
9598 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9599 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9600 }
9601 }
9602}
9603
9604
9605/**
9606 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9607 * Unconditional VM-exit.
9608 */
9609HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9610{
9611 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9612 return IEMExecVmxVmexitTripleFault(pVCpu);
9613}
9614
9615
9616/**
9617 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9618 */
9619HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9620{
9621 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9622
9623 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9624 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9625 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9626}
9627
9628
9629/**
9630 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9631 */
9632HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9633{
9634 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9635
9636 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9637 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9638 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9639}
9640
9641
9642/**
9643 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9644 * Unconditional VM-exit.
9645 */
9646HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9647{
9648 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9649
9650 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9651 | HMVMX_READ_EXIT_INSTR_LEN
9652 | HMVMX_READ_IDT_VECTORING_INFO
9653 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9654
9655 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9656 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
9657 pVmxTransient->uIdtVectoringErrorCode);
9658 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9659}
9660
9661
9662/**
9663 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9664 */
9665HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9666{
9667 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9668
9669 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9670 {
9671 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9672 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9673 }
9674 return vmxHCExitHlt(pVCpu, pVmxTransient);
9675}
9676
9677
9678/**
9679 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9680 */
9681HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9682{
9683 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9684
9685 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9686 {
9687 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9688 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9689 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9690 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9691 }
9692 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9693}
9694
9695
9696/**
9697 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9698 */
9699HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9700{
9701 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9702
9703 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9704 {
9705 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9706 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9707 }
9708 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9709}
9710
9711
9712/**
9713 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9714 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9715 */
9716HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9717{
9718 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9719
9720 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9721 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9722
9723 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
9724
9725 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9726 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9727 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9728
9729 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9730 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9731 u64VmcsField &= UINT64_C(0xffffffff);
9732
9733 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9734 {
9735 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9736 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9737 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9738 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9739 }
9740
9741 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9742 return vmxHCExitVmread(pVCpu, pVmxTransient);
9743 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9744}
9745
9746
9747/**
9748 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9749 */
9750HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9751{
9752 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9753
9754 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9755 {
9756 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9757 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9758 }
9759
9760 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9761}
9762
9763
9764/**
9765 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9766 * Conditional VM-exit.
9767 */
9768HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9769{
9770 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9771
9772 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9773 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9774
9775 VBOXSTRICTRC rcStrict;
9776 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9777 switch (uAccessType)
9778 {
9779 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9780 {
9781 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9782 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9783 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9784 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9785
9786 bool fIntercept;
9787 switch (iCrReg)
9788 {
9789 case 0:
9790 case 4:
9791 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9792 break;
9793
9794 case 3:
9795 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9796 break;
9797
9798 case 8:
9799 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9800 break;
9801
9802 default:
9803 fIntercept = false;
9804 break;
9805 }
9806 if (fIntercept)
9807 {
9808 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9809 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9810 }
9811 else
9812 {
9813 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9814 AssertRCReturn(rc, rc);
9815 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9816 }
9817 break;
9818 }
9819
9820 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9821 {
9822 /*
9823 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9824 * CR2 reads do not cause a VM-exit.
9825 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9826 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9827 */
9828 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9829 if ( iCrReg == 3
9830 || iCrReg == 8)
9831 {
9832 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9833 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9834 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9835 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9836 {
9837 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9838 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9839 }
9840 else
9841 {
9842 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9843 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9844 }
9845 }
9846 else
9847 {
9848 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9849 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9850 }
9851 break;
9852 }
9853
9854 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9855 {
9856 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9857 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9858 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9859 if ( (uGstHostMask & X86_CR0_TS)
9860 && (uReadShadow & X86_CR0_TS))
9861 {
9862 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9863 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9864 }
9865 else
9866 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9867 break;
9868 }
9869
9870 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9871 {
9872 RTGCPTR GCPtrEffDst;
9873 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9874 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9875 if (fMemOperand)
9876 {
9877 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
9878 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9879 }
9880 else
9881 GCPtrEffDst = NIL_RTGCPTR;
9882
9883 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9884 {
9885 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9886 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9887 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9888 }
9889 else
9890 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9891 break;
9892 }
9893
9894 default:
9895 {
9896 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9897 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9898 }
9899 }
9900
9901 if (rcStrict == VINF_IEM_RAISED_XCPT)
9902 {
9903 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9904 rcStrict = VINF_SUCCESS;
9905 }
9906 return rcStrict;
9907}
9908
9909
9910/**
9911 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9912 * Conditional VM-exit.
9913 */
9914HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9915{
9916 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9917
9918 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9919 {
9920 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9921 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9922 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9923 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9924 }
9925 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9926}
9927
9928
9929/**
9930 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9931 * Conditional VM-exit.
9932 */
9933HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9934{
9935 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9936
9937 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9938
9939 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9940 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9941 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9942
9943 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9944 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9945 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9946 {
9947 /*
9948 * IN/OUT instruction:
9949 * - Provides VM-exit instruction length.
9950 *
9951 * INS/OUTS instruction:
9952 * - Provides VM-exit instruction length.
9953 * - Provides Guest-linear address.
9954 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9955 */
9956 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9957 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9958
9959 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9960 pVmxTransient->ExitInstrInfo.u = 0;
9961 pVmxTransient->uGuestLinearAddr = 0;
9962
9963 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9964 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9965 if (fIOString)
9966 {
9967 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
9968 if (fVmxInsOutsInfo)
9969 {
9970 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9971 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
9972 }
9973 }
9974
9975 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
9976 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9977 }
9978 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9979}
9980
9981
9982/**
9983 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9984 */
9985HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9986{
9987 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9988
9989 uint32_t fMsrpm;
9990 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9991 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9992 else
9993 fMsrpm = VMXMSRPM_EXIT_RD;
9994
9995 if (fMsrpm & VMXMSRPM_EXIT_RD)
9996 {
9997 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9998 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9999 }
10000 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10001}
10002
10003
10004/**
10005 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10006 */
10007HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10008{
10009 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10010
10011 uint32_t fMsrpm;
10012 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10013 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10014 else
10015 fMsrpm = VMXMSRPM_EXIT_WR;
10016
10017 if (fMsrpm & VMXMSRPM_EXIT_WR)
10018 {
10019 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10020 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10021 }
10022 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10023}
10024
10025
10026/**
10027 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10028 */
10029HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10030{
10031 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10032
10033 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10034 {
10035 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10036 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10037 }
10038 return vmxHCExitMwait(pVCpu, pVmxTransient);
10039}
10040
10041
10042/**
10043 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10044 * VM-exit.
10045 */
10046HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10047{
10048 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10049
10050 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10051 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10052 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10053 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10054}
10055
10056
10057/**
10058 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10059 */
10060HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10061{
10062 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10063
10064 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10065 {
10066 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10067 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10068 }
10069 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10070}
10071
10072
10073/**
10074 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10075 */
10076HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10077{
10078 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10079
10080 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10081 * PAUSE when executing a nested-guest? If it does not, we would not need
10082 * to check for the intercepts here. Just call VM-exit... */
10083
10084 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10085 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10086 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10087 {
10088 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10089 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10090 }
10091 return vmxHCExitPause(pVCpu, pVmxTransient);
10092}
10093
10094
10095/**
10096 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10097 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10098 */
10099HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10100{
10101 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10102
10103 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10104 {
10105 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10106 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10107 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10108 }
10109 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10110}
10111
10112
10113/**
10114 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10115 * VM-exit.
10116 */
10117HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10118{
10119 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10120
10121 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10122 | HMVMX_READ_EXIT_INSTR_LEN
10123 | HMVMX_READ_IDT_VECTORING_INFO
10124 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10125
10126 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10127
10128 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10129 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10130
10131 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10132 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10133 pVmxTransient->uIdtVectoringErrorCode);
10134 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10135}
10136
10137
10138/**
10139 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10140 * Conditional VM-exit.
10141 */
10142HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10143{
10144 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10145
10146 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10147 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10148 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10149}
10150
10151
10152/**
10153 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10154 * Conditional VM-exit.
10155 */
10156HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10157{
10158 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10159
10160 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10161 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10162 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10163}
10164
10165
10166/**
10167 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10168 */
10169HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10170{
10171 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10172
10173 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10174 {
10175 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10176 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10177 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10178 }
10179 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10180}
10181
10182
10183/**
10184 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10185 */
10186HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10187{
10188 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10189
10190 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10191 {
10192 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10193 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10194 }
10195 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10196}
10197
10198
10199/**
10200 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10201 */
10202HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10203{
10204 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10205
10206 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10207 {
10208 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10209 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10210 | HMVMX_READ_EXIT_INSTR_INFO
10211 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10212 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10213 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10214 }
10215 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10216}
10217
10218
10219/**
10220 * Nested-guest VM-exit handler for invalid-guest state
10221 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10222 */
10223HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10224{
10225 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10226
10227 /*
10228 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10229 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10230 * Handle it like it's in an invalid guest state of the outer guest.
10231 *
10232 * When the fast path is implemented, this should be changed to cause the corresponding
10233 * nested-guest VM-exit.
10234 */
10235 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10236}
10237
10238
10239/**
10240 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10241 * and only provide the instruction length.
10242 *
10243 * Unconditional VM-exit.
10244 */
10245HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10246{
10247 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10248
10249#ifdef VBOX_STRICT
10250 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10251 switch (pVmxTransient->uExitReason)
10252 {
10253 case VMX_EXIT_ENCLS:
10254 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10255 break;
10256
10257 case VMX_EXIT_VMFUNC:
10258 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10259 break;
10260 }
10261#endif
10262
10263 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10264 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10265}
10266
10267
10268/**
10269 * Nested-guest VM-exit handler for instructions that provide instruction length as
10270 * well as more information.
10271 *
10272 * Unconditional VM-exit.
10273 */
10274HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10275{
10276 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10277
10278# ifdef VBOX_STRICT
10279 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10280 switch (pVmxTransient->uExitReason)
10281 {
10282 case VMX_EXIT_GDTR_IDTR_ACCESS:
10283 case VMX_EXIT_LDTR_TR_ACCESS:
10284 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10285 break;
10286
10287 case VMX_EXIT_RDRAND:
10288 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10289 break;
10290
10291 case VMX_EXIT_RDSEED:
10292 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10293 break;
10294
10295 case VMX_EXIT_XSAVES:
10296 case VMX_EXIT_XRSTORS:
10297 /** @todo NSTVMX: Verify XSS-bitmap. */
10298 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10299 break;
10300
10301 case VMX_EXIT_UMWAIT:
10302 case VMX_EXIT_TPAUSE:
10303 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10304 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10305 break;
10306
10307 case VMX_EXIT_LOADIWKEY:
10308 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10309 break;
10310 }
10311# endif
10312
10313 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10314 | HMVMX_READ_EXIT_INSTR_LEN
10315 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10316 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10317 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10318}
10319
10320# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10321
10322/**
10323 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10324 * Conditional VM-exit.
10325 */
10326HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10327{
10328 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10329 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10330
10331//# define DSL_IRQ_FIX_1
10332# define DSL_IRQ_FIX_2
10333
10334 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10335 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10336 {
10337 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10338 AssertRCReturn(rc, rc);
10339
10340# ifdef DSL_IRQ_FIX_2
10341 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10342 | HMVMX_READ_EXIT_INSTR_LEN
10343 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10344 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10345 | HMVMX_READ_IDT_VECTORING_INFO
10346 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10347 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10348
10349 /*
10350 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10351 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10352 * it's its problem to deal with that issue and we'll clear the recovered event.
10353 */
10354 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10355 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10356 { /*likely*/ }
10357 else
10358 {
10359 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10360 return rcStrict;
10361 }
10362 bool const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10363# else
10364 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10365 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10366 VBOXSTRICTRC rcStrict;
10367# endif
10368
10369 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10370 uint64_t const uExitQual = pVmxTransient->uExitQual;
10371
10372 RTGCPTR GCPtrNestedFault;
10373 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10374 if (fIsLinearAddrValid)
10375 {
10376 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10377 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10378 }
10379 else
10380 GCPtrNestedFault = 0;
10381
10382 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10383 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10384 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10385 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10386 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10387
10388 PGMPTWALK Walk;
10389 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10390 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx),
10391 GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault,
10392 &Walk);
10393 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10394 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10395 if (RT_SUCCESS(rcStrict))
10396 {
10397# ifdef DSL_IRQ_FIX_1
10398 /*
10399 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10400 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10401 * it's its problem to deal with that issue. This means that it's troublesome to
10402 * call vmxHCCheckExitDueToEventDelivery before PGMR0NestedTrap0eHandlerNestedPaging
10403 * have decided who's VMEXIT it is. Unfortunately, we're a bit of a pickle then if
10404 * we end up with an informational status here, as we _must_ _not_ drop events either.
10405 */
10406 /** @todo need better solution for this. Better solution should probably be
10407 * applied to other exits too... */
10408 if (rcStrict == VINF_SUCCESS)
10409 {
10410 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
10411 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10412 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10413 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10414 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10415
10416 vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10417 }
10418# endif
10419 return rcStrict;
10420 }
10421
10422# ifndef DSL_IRQ_FIX_2
10423 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10424 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10425 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10426# else
10427 if (fClearEventOnForward)
10428 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10429# endif
10430
10431 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10432 pVmxTransient->uIdtVectoringErrorCode);
10433 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10434 {
10435 VMXVEXITINFO const ExitInfo
10436 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10437 pVmxTransient->uExitQual,
10438 pVmxTransient->cbExitInstr,
10439 pVmxTransient->uGuestLinearAddr,
10440 pVmxTransient->uGuestPhysicalAddr);
10441 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10442 }
10443
10444 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10445 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10446 }
10447
10448 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10449}
10450
10451
10452/**
10453 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10454 * Conditional VM-exit.
10455 */
10456HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10457{
10458 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10459 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10460
10461 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10462 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10463 {
10464 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_ALL);
10465 AssertRCReturn(rc, rc);
10466
10467 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10468
10469 PGMPTWALK Walk;
10470 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10471 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10472 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10473 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10474 0 /* GCPtrNestedFault */, &Walk);
10475 if (RT_SUCCESS(rcStrict))
10476 {
10477 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10478 return rcStrict;
10479 }
10480
10481 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10482 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10483 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10484
10485 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10486 pVmxTransient->uIdtVectoringErrorCode);
10487 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10488 }
10489
10490 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10491}
10492
10493# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10494
10495/** @} */
10496#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10497
10498
10499/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10500 * probes.
10501 *
10502 * The following few functions and associated structure contains the bloat
10503 * necessary for providing detailed debug events and dtrace probes as well as
10504 * reliable host side single stepping. This works on the principle of
10505 * "subclassing" the normal execution loop and workers. We replace the loop
10506 * method completely and override selected helpers to add necessary adjustments
10507 * to their core operation.
10508 *
10509 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10510 * any performance for debug and analysis features.
10511 *
10512 * @{
10513 */
10514
10515/**
10516 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10517 * the debug run loop.
10518 */
10519typedef struct VMXRUNDBGSTATE
10520{
10521 /** The RIP we started executing at. This is for detecting that we stepped. */
10522 uint64_t uRipStart;
10523 /** The CS we started executing with. */
10524 uint16_t uCsStart;
10525
10526 /** Whether we've actually modified the 1st execution control field. */
10527 bool fModifiedProcCtls : 1;
10528 /** Whether we've actually modified the 2nd execution control field. */
10529 bool fModifiedProcCtls2 : 1;
10530 /** Whether we've actually modified the exception bitmap. */
10531 bool fModifiedXcptBitmap : 1;
10532
10533 /** We desire the modified the CR0 mask to be cleared. */
10534 bool fClearCr0Mask : 1;
10535 /** We desire the modified the CR4 mask to be cleared. */
10536 bool fClearCr4Mask : 1;
10537 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10538 uint32_t fCpe1Extra;
10539 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10540 uint32_t fCpe1Unwanted;
10541 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10542 uint32_t fCpe2Extra;
10543 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10544 uint32_t bmXcptExtra;
10545 /** The sequence number of the Dtrace provider settings the state was
10546 * configured against. */
10547 uint32_t uDtraceSettingsSeqNo;
10548 /** VM-exits to check (one bit per VM-exit). */
10549 uint32_t bmExitsToCheck[3];
10550
10551 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10552 uint32_t fProcCtlsInitial;
10553 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10554 uint32_t fProcCtls2Initial;
10555 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10556 uint32_t bmXcptInitial;
10557} VMXRUNDBGSTATE;
10558AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10559typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10560
10561
10562/**
10563 * Initializes the VMXRUNDBGSTATE structure.
10564 *
10565 * @param pVCpu The cross context virtual CPU structure of the
10566 * calling EMT.
10567 * @param pVmxTransient The VMX-transient structure.
10568 * @param pDbgState The debug state to initialize.
10569 */
10570static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10571{
10572 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10573 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10574
10575 pDbgState->fModifiedProcCtls = false;
10576 pDbgState->fModifiedProcCtls2 = false;
10577 pDbgState->fModifiedXcptBitmap = false;
10578 pDbgState->fClearCr0Mask = false;
10579 pDbgState->fClearCr4Mask = false;
10580 pDbgState->fCpe1Extra = 0;
10581 pDbgState->fCpe1Unwanted = 0;
10582 pDbgState->fCpe2Extra = 0;
10583 pDbgState->bmXcptExtra = 0;
10584 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10585 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10586 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10587}
10588
10589
10590/**
10591 * Updates the VMSC fields with changes requested by @a pDbgState.
10592 *
10593 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10594 * immediately before executing guest code, i.e. when interrupts are disabled.
10595 * We don't check status codes here as we cannot easily assert or return in the
10596 * latter case.
10597 *
10598 * @param pVCpu The cross context virtual CPU structure.
10599 * @param pVmxTransient The VMX-transient structure.
10600 * @param pDbgState The debug state.
10601 */
10602static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10603{
10604 /*
10605 * Ensure desired flags in VMCS control fields are set.
10606 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10607 *
10608 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10609 * there should be no stale data in pCtx at this point.
10610 */
10611 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10612 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10613 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10614 {
10615 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10616 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10617 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10618 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10619 pDbgState->fModifiedProcCtls = true;
10620 }
10621
10622 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10623 {
10624 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10625 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10626 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10627 pDbgState->fModifiedProcCtls2 = true;
10628 }
10629
10630 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10631 {
10632 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10633 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10634 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10635 pDbgState->fModifiedXcptBitmap = true;
10636 }
10637
10638 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10639 {
10640 pVmcsInfo->u64Cr0Mask = 0;
10641 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10642 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10643 }
10644
10645 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10646 {
10647 pVmcsInfo->u64Cr4Mask = 0;
10648 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10649 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10650 }
10651
10652 NOREF(pVCpu);
10653}
10654
10655
10656/**
10657 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10658 * re-entry next time around.
10659 *
10660 * @returns Strict VBox status code (i.e. informational status codes too).
10661 * @param pVCpu The cross context virtual CPU structure.
10662 * @param pVmxTransient The VMX-transient structure.
10663 * @param pDbgState The debug state.
10664 * @param rcStrict The return code from executing the guest using single
10665 * stepping.
10666 */
10667static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10668 VBOXSTRICTRC rcStrict)
10669{
10670 /*
10671 * Restore VM-exit control settings as we may not reenter this function the
10672 * next time around.
10673 */
10674 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10675
10676 /* We reload the initial value, trigger what we can of recalculations the
10677 next time around. From the looks of things, that's all that's required atm. */
10678 if (pDbgState->fModifiedProcCtls)
10679 {
10680 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10681 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10682 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10683 AssertRC(rc2);
10684 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10685 }
10686
10687 /* We're currently the only ones messing with this one, so just restore the
10688 cached value and reload the field. */
10689 if ( pDbgState->fModifiedProcCtls2
10690 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10691 {
10692 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10693 AssertRC(rc2);
10694 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10695 }
10696
10697 /* If we've modified the exception bitmap, we restore it and trigger
10698 reloading and partial recalculation the next time around. */
10699 if (pDbgState->fModifiedXcptBitmap)
10700 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10701
10702 return rcStrict;
10703}
10704
10705
10706/**
10707 * Configures VM-exit controls for current DBGF and DTrace settings.
10708 *
10709 * This updates @a pDbgState and the VMCS execution control fields to reflect
10710 * the necessary VM-exits demanded by DBGF and DTrace.
10711 *
10712 * @param pVCpu The cross context virtual CPU structure.
10713 * @param pVmxTransient The VMX-transient structure. May update
10714 * fUpdatedTscOffsettingAndPreemptTimer.
10715 * @param pDbgState The debug state.
10716 */
10717static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10718{
10719#ifndef IN_NEM_DARWIN
10720 /*
10721 * Take down the dtrace serial number so we can spot changes.
10722 */
10723 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10724 ASMCompilerBarrier();
10725#endif
10726
10727 /*
10728 * We'll rebuild most of the middle block of data members (holding the
10729 * current settings) as we go along here, so start by clearing it all.
10730 */
10731 pDbgState->bmXcptExtra = 0;
10732 pDbgState->fCpe1Extra = 0;
10733 pDbgState->fCpe1Unwanted = 0;
10734 pDbgState->fCpe2Extra = 0;
10735 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10736 pDbgState->bmExitsToCheck[i] = 0;
10737
10738 /*
10739 * Software interrupts (INT XXh) - no idea how to trigger these...
10740 */
10741 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10742 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10743 || VBOXVMM_INT_SOFTWARE_ENABLED())
10744 {
10745 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10746 }
10747
10748 /*
10749 * INT3 breakpoints - triggered by #BP exceptions.
10750 */
10751 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10752 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10753
10754 /*
10755 * Exception bitmap and XCPT events+probes.
10756 */
10757 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10758 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10759 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10760
10761 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10762 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10763 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10764 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10765 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10766 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10767 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10768 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10769 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10770 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10771 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10772 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10773 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10774 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10775 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10776 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10777 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10778 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10779
10780 if (pDbgState->bmXcptExtra)
10781 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10782
10783 /*
10784 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10785 *
10786 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10787 * So, when adding/changing/removing please don't forget to update it.
10788 *
10789 * Some of the macros are picking up local variables to save horizontal space,
10790 * (being able to see it in a table is the lesser evil here).
10791 */
10792#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10793 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10794 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10795#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10796 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10797 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10798 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10799 } else do { } while (0)
10800#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10801 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10802 { \
10803 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10804 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10805 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10806 } else do { } while (0)
10807#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10808 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10809 { \
10810 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10811 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10812 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10813 } else do { } while (0)
10814#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10815 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10816 { \
10817 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10818 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10819 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10820 } else do { } while (0)
10821
10822 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10823 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10824 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10825 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10826 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10827
10828 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10829 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10830 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10831 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10832 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10833 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10834 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10835 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10836 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10837 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10838 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10839 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10840 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10841 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10842 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10843 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10844 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10845 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10846 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10847 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10848 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10849 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10850 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10851 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10852 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10853 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10854 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10855 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10856 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10857 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10858 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10859 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10860 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10861 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10862 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10863 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10864
10865 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10866 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10867 {
10868 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10869 | CPUMCTX_EXTRN_APIC_TPR);
10870 AssertRC(rc);
10871
10872#if 0 /** @todo fix me */
10873 pDbgState->fClearCr0Mask = true;
10874 pDbgState->fClearCr4Mask = true;
10875#endif
10876 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10877 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10878 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10879 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10880 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10881 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10882 require clearing here and in the loop if we start using it. */
10883 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10884 }
10885 else
10886 {
10887 if (pDbgState->fClearCr0Mask)
10888 {
10889 pDbgState->fClearCr0Mask = false;
10890 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10891 }
10892 if (pDbgState->fClearCr4Mask)
10893 {
10894 pDbgState->fClearCr4Mask = false;
10895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10896 }
10897 }
10898 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10899 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10900
10901 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10902 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10903 {
10904 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10905 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10906 }
10907 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10908 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10909
10910 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10911 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10912 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10913 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10914 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10915 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10916 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10917 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10918#if 0 /** @todo too slow, fix handler. */
10919 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10920#endif
10921 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10922
10923 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10924 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10925 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10926 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10927 {
10928 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10929 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10930 }
10931 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10932 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10933 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10934 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10935
10936 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10937 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10938 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10939 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10940 {
10941 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10942 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10943 }
10944 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10945 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10946 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10947 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10948
10949 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10950 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10951 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10952 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10953 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10954 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10955 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10956 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10957 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10958 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10959 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10960 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10961 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10962 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10963 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10964 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10965 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10966 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10967 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10968 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10969 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10970 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10971
10972#undef IS_EITHER_ENABLED
10973#undef SET_ONLY_XBM_IF_EITHER_EN
10974#undef SET_CPE1_XBM_IF_EITHER_EN
10975#undef SET_CPEU_XBM_IF_EITHER_EN
10976#undef SET_CPE2_XBM_IF_EITHER_EN
10977
10978 /*
10979 * Sanitize the control stuff.
10980 */
10981 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10982 if (pDbgState->fCpe2Extra)
10983 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10984 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10985 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10986#ifndef IN_NEM_DARWIN
10987 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10988 {
10989 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10990 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10991 }
10992#else
10993 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10994 {
10995 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10996 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10997 }
10998#endif
10999
11000 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11001 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11002 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11003 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11004}
11005
11006
11007/**
11008 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11009 * appropriate.
11010 *
11011 * The caller has checked the VM-exit against the
11012 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11013 * already, so we don't have to do that either.
11014 *
11015 * @returns Strict VBox status code (i.e. informational status codes too).
11016 * @param pVCpu The cross context virtual CPU structure.
11017 * @param pVmxTransient The VMX-transient structure.
11018 * @param uExitReason The VM-exit reason.
11019 *
11020 * @remarks The name of this function is displayed by dtrace, so keep it short
11021 * and to the point. No longer than 33 chars long, please.
11022 */
11023static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11024{
11025 /*
11026 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11027 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11028 *
11029 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11030 * does. Must add/change/remove both places. Same ordering, please.
11031 *
11032 * Added/removed events must also be reflected in the next section
11033 * where we dispatch dtrace events.
11034 */
11035 bool fDtrace1 = false;
11036 bool fDtrace2 = false;
11037 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11038 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11039 uint32_t uEventArg = 0;
11040#define SET_EXIT(a_EventSubName) \
11041 do { \
11042 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11043 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11044 } while (0)
11045#define SET_BOTH(a_EventSubName) \
11046 do { \
11047 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11048 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11049 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11050 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11051 } while (0)
11052 switch (uExitReason)
11053 {
11054 case VMX_EXIT_MTF:
11055 return vmxHCExitMtf(pVCpu, pVmxTransient);
11056
11057 case VMX_EXIT_XCPT_OR_NMI:
11058 {
11059 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11060 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11061 {
11062 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11063 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11064 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11065 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11066 {
11067 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11068 {
11069 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11070 uEventArg = pVmxTransient->uExitIntErrorCode;
11071 }
11072 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11073 switch (enmEvent1)
11074 {
11075 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11076 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11077 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11078 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11079 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11080 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11081 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11082 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11083 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11084 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11085 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11086 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11087 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11088 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11089 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11090 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11091 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11092 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11093 default: break;
11094 }
11095 }
11096 else
11097 AssertFailed();
11098 break;
11099
11100 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11101 uEventArg = idxVector;
11102 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11103 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11104 break;
11105 }
11106 break;
11107 }
11108
11109 case VMX_EXIT_TRIPLE_FAULT:
11110 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11111 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11112 break;
11113 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11114 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11115 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11116 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11117 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11118
11119 /* Instruction specific VM-exits: */
11120 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11121 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11122 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11123 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11124 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11125 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11126 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11127 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11128 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11129 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11130 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11131 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11132 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11133 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11134 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11135 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11136 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11137 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11138 case VMX_EXIT_MOV_CRX:
11139 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11140 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11141 SET_BOTH(CRX_READ);
11142 else
11143 SET_BOTH(CRX_WRITE);
11144 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11145 break;
11146 case VMX_EXIT_MOV_DRX:
11147 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11148 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11149 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11150 SET_BOTH(DRX_READ);
11151 else
11152 SET_BOTH(DRX_WRITE);
11153 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11154 break;
11155 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11156 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11157 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11158 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11159 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11160 case VMX_EXIT_GDTR_IDTR_ACCESS:
11161 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11162 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11163 {
11164 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11165 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11166 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11167 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11168 }
11169 break;
11170
11171 case VMX_EXIT_LDTR_TR_ACCESS:
11172 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11173 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11174 {
11175 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11176 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11177 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11178 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11179 }
11180 break;
11181
11182 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11183 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11184 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11185 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11186 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11187 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11188 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11189 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11190 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11191 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11192 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11193
11194 /* Events that aren't relevant at this point. */
11195 case VMX_EXIT_EXT_INT:
11196 case VMX_EXIT_INT_WINDOW:
11197 case VMX_EXIT_NMI_WINDOW:
11198 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11199 case VMX_EXIT_PREEMPT_TIMER:
11200 case VMX_EXIT_IO_INSTR:
11201 break;
11202
11203 /* Errors and unexpected events. */
11204 case VMX_EXIT_INIT_SIGNAL:
11205 case VMX_EXIT_SIPI:
11206 case VMX_EXIT_IO_SMI:
11207 case VMX_EXIT_SMI:
11208 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11209 case VMX_EXIT_ERR_MSR_LOAD:
11210 case VMX_EXIT_ERR_MACHINE_CHECK:
11211 case VMX_EXIT_PML_FULL:
11212 case VMX_EXIT_VIRTUALIZED_EOI:
11213 break;
11214
11215 default:
11216 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11217 break;
11218 }
11219#undef SET_BOTH
11220#undef SET_EXIT
11221
11222 /*
11223 * Dtrace tracepoints go first. We do them here at once so we don't
11224 * have to copy the guest state saving and stuff a few dozen times.
11225 * Down side is that we've got to repeat the switch, though this time
11226 * we use enmEvent since the probes are a subset of what DBGF does.
11227 */
11228 if (fDtrace1 || fDtrace2)
11229 {
11230 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11231 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11232 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11233 switch (enmEvent1)
11234 {
11235 /** @todo consider which extra parameters would be helpful for each probe. */
11236 case DBGFEVENT_END: break;
11237 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11238 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11239 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11240 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11241 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11242 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11243 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11244 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11245 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11246 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11247 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11248 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11249 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11250 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11251 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11252 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11253 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11254 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11255 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11256 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11257 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11258 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11259 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11260 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11261 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11262 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11263 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11264 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11265 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11266 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11267 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11268 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11269 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11270 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11271 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11272 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11273 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11274 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11275 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11276 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11277 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11278 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11279 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11280 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11281 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11282 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11283 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11284 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11285 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11286 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11287 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11288 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11289 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11290 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11291 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11292 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11293 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11294 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11295 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11296 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11297 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11298 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11299 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11300 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11301 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11302 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11303 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11304 }
11305 switch (enmEvent2)
11306 {
11307 /** @todo consider which extra parameters would be helpful for each probe. */
11308 case DBGFEVENT_END: break;
11309 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11310 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11311 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11312 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11313 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11314 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11315 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11316 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11317 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11318 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11319 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11320 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11321 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11322 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11323 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11324 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11325 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11326 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11327 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11328 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11329 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11330 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11331 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11332 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11333 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11334 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11335 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11336 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11337 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11338 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11339 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11340 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11341 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11342 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11343 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11344 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11345 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11346 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11347 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11348 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11349 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11350 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11351 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11352 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11353 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11354 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11355 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11356 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11357 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11358 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11359 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11360 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11361 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11362 }
11363 }
11364
11365 /*
11366 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11367 * the DBGF call will do a full check).
11368 *
11369 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11370 * Note! If we have to events, we prioritize the first, i.e. the instruction
11371 * one, in order to avoid event nesting.
11372 */
11373 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11374 if ( enmEvent1 != DBGFEVENT_END
11375 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11376 {
11377 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11378 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11379 if (rcStrict != VINF_SUCCESS)
11380 return rcStrict;
11381 }
11382 else if ( enmEvent2 != DBGFEVENT_END
11383 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11384 {
11385 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11386 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11387 if (rcStrict != VINF_SUCCESS)
11388 return rcStrict;
11389 }
11390
11391 return VINF_SUCCESS;
11392}
11393
11394
11395/**
11396 * Single-stepping VM-exit filtering.
11397 *
11398 * This is preprocessing the VM-exits and deciding whether we've gotten far
11399 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11400 * handling is performed.
11401 *
11402 * @returns Strict VBox status code (i.e. informational status codes too).
11403 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11404 * @param pVmxTransient The VMX-transient structure.
11405 * @param pDbgState The debug state.
11406 */
11407DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11408{
11409 /*
11410 * Expensive (saves context) generic dtrace VM-exit probe.
11411 */
11412 uint32_t const uExitReason = pVmxTransient->uExitReason;
11413 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11414 { /* more likely */ }
11415 else
11416 {
11417 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11418 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11419 AssertRC(rc);
11420 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11421 }
11422
11423#ifndef IN_NEM_DARWIN
11424 /*
11425 * Check for host NMI, just to get that out of the way.
11426 */
11427 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11428 { /* normally likely */ }
11429 else
11430 {
11431 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11432 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11433 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11434 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11435 }
11436#endif
11437
11438 /*
11439 * Check for single stepping event if we're stepping.
11440 */
11441 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11442 {
11443 switch (uExitReason)
11444 {
11445 case VMX_EXIT_MTF:
11446 return vmxHCExitMtf(pVCpu, pVmxTransient);
11447
11448 /* Various events: */
11449 case VMX_EXIT_XCPT_OR_NMI:
11450 case VMX_EXIT_EXT_INT:
11451 case VMX_EXIT_TRIPLE_FAULT:
11452 case VMX_EXIT_INT_WINDOW:
11453 case VMX_EXIT_NMI_WINDOW:
11454 case VMX_EXIT_TASK_SWITCH:
11455 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11456 case VMX_EXIT_APIC_ACCESS:
11457 case VMX_EXIT_EPT_VIOLATION:
11458 case VMX_EXIT_EPT_MISCONFIG:
11459 case VMX_EXIT_PREEMPT_TIMER:
11460
11461 /* Instruction specific VM-exits: */
11462 case VMX_EXIT_CPUID:
11463 case VMX_EXIT_GETSEC:
11464 case VMX_EXIT_HLT:
11465 case VMX_EXIT_INVD:
11466 case VMX_EXIT_INVLPG:
11467 case VMX_EXIT_RDPMC:
11468 case VMX_EXIT_RDTSC:
11469 case VMX_EXIT_RSM:
11470 case VMX_EXIT_VMCALL:
11471 case VMX_EXIT_VMCLEAR:
11472 case VMX_EXIT_VMLAUNCH:
11473 case VMX_EXIT_VMPTRLD:
11474 case VMX_EXIT_VMPTRST:
11475 case VMX_EXIT_VMREAD:
11476 case VMX_EXIT_VMRESUME:
11477 case VMX_EXIT_VMWRITE:
11478 case VMX_EXIT_VMXOFF:
11479 case VMX_EXIT_VMXON:
11480 case VMX_EXIT_MOV_CRX:
11481 case VMX_EXIT_MOV_DRX:
11482 case VMX_EXIT_IO_INSTR:
11483 case VMX_EXIT_RDMSR:
11484 case VMX_EXIT_WRMSR:
11485 case VMX_EXIT_MWAIT:
11486 case VMX_EXIT_MONITOR:
11487 case VMX_EXIT_PAUSE:
11488 case VMX_EXIT_GDTR_IDTR_ACCESS:
11489 case VMX_EXIT_LDTR_TR_ACCESS:
11490 case VMX_EXIT_INVEPT:
11491 case VMX_EXIT_RDTSCP:
11492 case VMX_EXIT_INVVPID:
11493 case VMX_EXIT_WBINVD:
11494 case VMX_EXIT_XSETBV:
11495 case VMX_EXIT_RDRAND:
11496 case VMX_EXIT_INVPCID:
11497 case VMX_EXIT_VMFUNC:
11498 case VMX_EXIT_RDSEED:
11499 case VMX_EXIT_XSAVES:
11500 case VMX_EXIT_XRSTORS:
11501 {
11502 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11503 AssertRCReturn(rc, rc);
11504 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11505 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11506 return VINF_EM_DBG_STEPPED;
11507 break;
11508 }
11509
11510 /* Errors and unexpected events: */
11511 case VMX_EXIT_INIT_SIGNAL:
11512 case VMX_EXIT_SIPI:
11513 case VMX_EXIT_IO_SMI:
11514 case VMX_EXIT_SMI:
11515 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11516 case VMX_EXIT_ERR_MSR_LOAD:
11517 case VMX_EXIT_ERR_MACHINE_CHECK:
11518 case VMX_EXIT_PML_FULL:
11519 case VMX_EXIT_VIRTUALIZED_EOI:
11520 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11521 break;
11522
11523 default:
11524 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11525 break;
11526 }
11527 }
11528
11529 /*
11530 * Check for debugger event breakpoints and dtrace probes.
11531 */
11532 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11533 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11534 {
11535 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11536 if (rcStrict != VINF_SUCCESS)
11537 return rcStrict;
11538 }
11539
11540 /*
11541 * Normal processing.
11542 */
11543#ifdef HMVMX_USE_FUNCTION_TABLE
11544 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11545#else
11546 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11547#endif
11548}
11549
11550/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette