VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97065

Last change on this file since 97065 was 97065, checked in by vboxsync, 3 years ago

VMM/HMVMXR0: Converted the VMX_VMCS*_GUEST_SEG_XXXX macro test assertions from table+runtime assertions to compile time ones, now that vmxHCImportGuestSegReg is a template.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 495.8 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97065 2022-10-09 23:01:38Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330
331 /* 16-bit guest-state fields. */
332 VMX_VMCS16_GUEST_ES_SEL,
333 VMX_VMCS16_GUEST_CS_SEL,
334 VMX_VMCS16_GUEST_SS_SEL,
335 VMX_VMCS16_GUEST_DS_SEL,
336 VMX_VMCS16_GUEST_FS_SEL,
337 VMX_VMCS16_GUEST_GS_SEL,
338 VMX_VMCS16_GUEST_LDTR_SEL,
339 VMX_VMCS16_GUEST_TR_SEL,
340 VMX_VMCS16_GUEST_INTR_STATUS,
341 VMX_VMCS16_GUEST_PML_INDEX,
342
343 /* 16-bits host-state fields. */
344 VMX_VMCS16_HOST_ES_SEL,
345 VMX_VMCS16_HOST_CS_SEL,
346 VMX_VMCS16_HOST_SS_SEL,
347 VMX_VMCS16_HOST_DS_SEL,
348 VMX_VMCS16_HOST_FS_SEL,
349 VMX_VMCS16_HOST_GS_SEL,
350 VMX_VMCS16_HOST_TR_SEL,
351
352 /* 64-bit control fields. */
353 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
355 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
357 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
358 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
361 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
365 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
367 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
369 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
370 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
371 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
373 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
375 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
377 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
379 VMX_VMCS64_CTRL_EPTP_FULL,
380 VMX_VMCS64_CTRL_EPTP_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
389 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
390 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
391 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
395 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
397 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
401 VMX_VMCS64_CTRL_SPPTP_FULL,
402 VMX_VMCS64_CTRL_SPPTP_HIGH,
403 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
405 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
406 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
407 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
409
410 /* 64-bit read-only data fields. */
411 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
413
414 /* 64-bit guest-state fields. */
415 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
417 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
418 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
419 VMX_VMCS64_GUEST_PAT_FULL,
420 VMX_VMCS64_GUEST_PAT_HIGH,
421 VMX_VMCS64_GUEST_EFER_FULL,
422 VMX_VMCS64_GUEST_EFER_HIGH,
423 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
425 VMX_VMCS64_GUEST_PDPTE0_FULL,
426 VMX_VMCS64_GUEST_PDPTE0_HIGH,
427 VMX_VMCS64_GUEST_PDPTE1_FULL,
428 VMX_VMCS64_GUEST_PDPTE1_HIGH,
429 VMX_VMCS64_GUEST_PDPTE2_FULL,
430 VMX_VMCS64_GUEST_PDPTE2_HIGH,
431 VMX_VMCS64_GUEST_PDPTE3_FULL,
432 VMX_VMCS64_GUEST_PDPTE3_HIGH,
433 VMX_VMCS64_GUEST_BNDCFGS_FULL,
434 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
435 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
436 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
437 VMX_VMCS64_GUEST_PKRS_FULL,
438 VMX_VMCS64_GUEST_PKRS_HIGH,
439
440 /* 64-bit host-state fields. */
441 VMX_VMCS64_HOST_PAT_FULL,
442 VMX_VMCS64_HOST_PAT_HIGH,
443 VMX_VMCS64_HOST_EFER_FULL,
444 VMX_VMCS64_HOST_EFER_HIGH,
445 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
447 VMX_VMCS64_HOST_PKRS_FULL,
448 VMX_VMCS64_HOST_PKRS_HIGH,
449
450 /* 32-bit control fields. */
451 VMX_VMCS32_CTRL_PIN_EXEC,
452 VMX_VMCS32_CTRL_PROC_EXEC,
453 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
454 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
456 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
457 VMX_VMCS32_CTRL_EXIT,
458 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
459 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY,
461 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
462 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
463 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
464 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
465 VMX_VMCS32_CTRL_TPR_THRESHOLD,
466 VMX_VMCS32_CTRL_PROC_EXEC2,
467 VMX_VMCS32_CTRL_PLE_GAP,
468 VMX_VMCS32_CTRL_PLE_WINDOW,
469
470 /* 32-bits read-only fields. */
471 VMX_VMCS32_RO_VM_INSTR_ERROR,
472 VMX_VMCS32_RO_EXIT_REASON,
473 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
475 VMX_VMCS32_RO_IDT_VECTORING_INFO,
476 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
477 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
478 VMX_VMCS32_RO_EXIT_INSTR_INFO,
479
480 /* 32-bit guest-state fields. */
481 VMX_VMCS32_GUEST_ES_LIMIT,
482 VMX_VMCS32_GUEST_CS_LIMIT,
483 VMX_VMCS32_GUEST_SS_LIMIT,
484 VMX_VMCS32_GUEST_DS_LIMIT,
485 VMX_VMCS32_GUEST_FS_LIMIT,
486 VMX_VMCS32_GUEST_GS_LIMIT,
487 VMX_VMCS32_GUEST_LDTR_LIMIT,
488 VMX_VMCS32_GUEST_TR_LIMIT,
489 VMX_VMCS32_GUEST_GDTR_LIMIT,
490 VMX_VMCS32_GUEST_IDTR_LIMIT,
491 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_INT_STATE,
500 VMX_VMCS32_GUEST_ACTIVITY_STATE,
501 VMX_VMCS32_GUEST_SMBASE,
502 VMX_VMCS32_GUEST_SYSENTER_CS,
503 VMX_VMCS32_PREEMPT_TIMER_VALUE,
504
505 /* 32-bit host-state fields. */
506 VMX_VMCS32_HOST_SYSENTER_CS,
507
508 /* Natural-width control fields. */
509 VMX_VMCS_CTRL_CR0_MASK,
510 VMX_VMCS_CTRL_CR4_MASK,
511 VMX_VMCS_CTRL_CR0_READ_SHADOW,
512 VMX_VMCS_CTRL_CR4_READ_SHADOW,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
517
518 /* Natural-width read-only data fields. */
519 VMX_VMCS_RO_EXIT_QUALIFICATION,
520 VMX_VMCS_RO_IO_RCX,
521 VMX_VMCS_RO_IO_RSI,
522 VMX_VMCS_RO_IO_RDI,
523 VMX_VMCS_RO_IO_RIP,
524 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
525
526 /* Natural-width guest-state field */
527 VMX_VMCS_GUEST_CR0,
528 VMX_VMCS_GUEST_CR3,
529 VMX_VMCS_GUEST_CR4,
530 VMX_VMCS_GUEST_ES_BASE,
531 VMX_VMCS_GUEST_CS_BASE,
532 VMX_VMCS_GUEST_SS_BASE,
533 VMX_VMCS_GUEST_DS_BASE,
534 VMX_VMCS_GUEST_FS_BASE,
535 VMX_VMCS_GUEST_GS_BASE,
536 VMX_VMCS_GUEST_LDTR_BASE,
537 VMX_VMCS_GUEST_TR_BASE,
538 VMX_VMCS_GUEST_GDTR_BASE,
539 VMX_VMCS_GUEST_IDTR_BASE,
540 VMX_VMCS_GUEST_DR7,
541 VMX_VMCS_GUEST_RSP,
542 VMX_VMCS_GUEST_RIP,
543 VMX_VMCS_GUEST_RFLAGS,
544 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
545 VMX_VMCS_GUEST_SYSENTER_ESP,
546 VMX_VMCS_GUEST_SYSENTER_EIP,
547 VMX_VMCS_GUEST_S_CET,
548 VMX_VMCS_GUEST_SSP,
549 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
550
551 /* Natural-width host-state fields */
552 VMX_VMCS_HOST_CR0,
553 VMX_VMCS_HOST_CR3,
554 VMX_VMCS_HOST_CR4,
555 VMX_VMCS_HOST_FS_BASE,
556 VMX_VMCS_HOST_GS_BASE,
557 VMX_VMCS_HOST_TR_BASE,
558 VMX_VMCS_HOST_GDTR_BASE,
559 VMX_VMCS_HOST_IDTR_BASE,
560 VMX_VMCS_HOST_SYSENTER_ESP,
561 VMX_VMCS_HOST_SYSENTER_EIP,
562 VMX_VMCS_HOST_RSP,
563 VMX_VMCS_HOST_RIP,
564 VMX_VMCS_HOST_S_CET,
565 VMX_VMCS_HOST_SSP,
566 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
567};
568#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
569
570#ifdef HMVMX_USE_FUNCTION_TABLE
571/**
572 * VMX_EXIT dispatch table.
573 */
574static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
575{
576 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
577 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
578 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
579 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
580 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
581 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
582 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
583 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
584 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
585 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
586 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
587 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
588 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
589 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
590 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
591 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
592 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
593 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
594 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
596 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
597 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
598 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
599 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
600 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
601 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
602 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
603 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
604 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
605#else
606 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
607 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
608 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
609 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
610 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
611 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
612 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
613 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
614 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
615#endif
616 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
617 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
618 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
619 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
620 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
621 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
622 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
623 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
624 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
625 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
626 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
627 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
628 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
629 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
630 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
632 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
633 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
634 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
635 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
636 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
637 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
639 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
640#else
641 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
642#endif
643 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
644 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
646 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
647#else
648 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
651 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
652 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
653 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
654 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
655 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
656 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
657 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
658 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
659 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
660 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
661 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
662 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
663 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
664 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
665 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
666};
667#endif /* HMVMX_USE_FUNCTION_TABLE */
668
669#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
670static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
671{
672 /* 0 */ "(Not Used)",
673 /* 1 */ "VMCALL executed in VMX root operation.",
674 /* 2 */ "VMCLEAR with invalid physical address.",
675 /* 3 */ "VMCLEAR with VMXON pointer.",
676 /* 4 */ "VMLAUNCH with non-clear VMCS.",
677 /* 5 */ "VMRESUME with non-launched VMCS.",
678 /* 6 */ "VMRESUME after VMXOFF",
679 /* 7 */ "VM-entry with invalid control fields.",
680 /* 8 */ "VM-entry with invalid host state fields.",
681 /* 9 */ "VMPTRLD with invalid physical address.",
682 /* 10 */ "VMPTRLD with VMXON pointer.",
683 /* 11 */ "VMPTRLD with incorrect revision identifier.",
684 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
685 /* 13 */ "VMWRITE to read-only VMCS component.",
686 /* 14 */ "(Not Used)",
687 /* 15 */ "VMXON executed in VMX root operation.",
688 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
689 /* 17 */ "VM-entry with non-launched executing VMCS.",
690 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
691 /* 19 */ "VMCALL with non-clear VMCS.",
692 /* 20 */ "VMCALL with invalid VM-exit control fields.",
693 /* 21 */ "(Not Used)",
694 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
695 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
696 /* 24 */ "VMCALL with invalid SMM-monitor features.",
697 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
698 /* 26 */ "VM-entry with events blocked by MOV SS.",
699 /* 27 */ "(Not Used)",
700 /* 28 */ "Invalid operand to INVEPT/INVVPID."
701};
702#endif /* VBOX_STRICT && LOG_ENABLED */
703
704
705/**
706 * Gets the CR0 guest/host mask.
707 *
708 * These bits typically does not change through the lifetime of a VM. Any bit set in
709 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
710 * by the guest.
711 *
712 * @returns The CR0 guest/host mask.
713 * @param pVCpu The cross context virtual CPU structure.
714 */
715static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
716{
717 /*
718 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
719 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
720 *
721 * Furthermore, modifications to any bits that are reserved/unspecified currently
722 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
723 * when future CPUs specify and use currently reserved/unspecified bits.
724 */
725 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
726 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
727 * and @bugref{6944}. */
728 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
729 return ( X86_CR0_PE
730 | X86_CR0_NE
731 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
732 | X86_CR0_PG
733 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
734}
735
736
737/**
738 * Gets the CR4 guest/host mask.
739 *
740 * These bits typically does not change through the lifetime of a VM. Any bit set in
741 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
742 * by the guest.
743 *
744 * @returns The CR4 guest/host mask.
745 * @param pVCpu The cross context virtual CPU structure.
746 */
747static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
748{
749 /*
750 * We construct a mask of all CR4 bits that the guest can modify without causing
751 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
752 * a VM-exit when the guest attempts to modify them when executing using
753 * hardware-assisted VMX.
754 *
755 * When a feature is not exposed to the guest (and may be present on the host),
756 * we want to intercept guest modifications to the bit so we can emulate proper
757 * behavior (e.g., #GP).
758 *
759 * Furthermore, only modifications to those bits that don't require immediate
760 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
761 * depends on CR3 which might not always be the guest value while executing
762 * using hardware-assisted VMX.
763 */
764 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
765 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
766#ifdef IN_NEM_DARWIN
767 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
768#endif
769 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
770
771 /*
772 * Paranoia.
773 * Ensure features exposed to the guest are present on the host.
774 */
775 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
776#ifdef IN_NEM_DARWIN
777 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
778#endif
779 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
780
781 uint64_t const fGstMask = X86_CR4_PVI
782 | X86_CR4_TSD
783 | X86_CR4_DE
784 | X86_CR4_MCE
785 | X86_CR4_PCE
786 | X86_CR4_OSXMMEEXCPT
787 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
788#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
789 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
790 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
791#endif
792 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
793 return ~fGstMask;
794}
795
796
797/**
798 * Adds one or more exceptions to the exception bitmap and commits it to the current
799 * VMCS.
800 *
801 * @param pVCpu The cross context virtual CPU structure.
802 * @param pVmxTransient The VMX-transient structure.
803 * @param uXcptMask The exception(s) to add.
804 */
805static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
806{
807 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
808 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
809 if ((uXcptBitmap & uXcptMask) != uXcptMask)
810 {
811 uXcptBitmap |= uXcptMask;
812 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
813 AssertRC(rc);
814 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
815 }
816}
817
818
819/**
820 * Adds an exception to the exception bitmap and commits it to the current VMCS.
821 *
822 * @param pVCpu The cross context virtual CPU structure.
823 * @param pVmxTransient The VMX-transient structure.
824 * @param uXcpt The exception to add.
825 */
826static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
827{
828 Assert(uXcpt <= X86_XCPT_LAST);
829 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
830}
831
832
833/**
834 * Remove one or more exceptions from the exception bitmap and commits it to the
835 * current VMCS.
836 *
837 * This takes care of not removing the exception intercept if a nested-guest
838 * requires the exception to be intercepted.
839 *
840 * @returns VBox status code.
841 * @param pVCpu The cross context virtual CPU structure.
842 * @param pVmxTransient The VMX-transient structure.
843 * @param uXcptMask The exception(s) to remove.
844 */
845static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
846{
847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
848 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
849 if (u32XcptBitmap & uXcptMask)
850 {
851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
852 if (!pVmxTransient->fIsNestedGuest)
853 { /* likely */ }
854 else
855 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
856#endif
857#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
858 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
859 | RT_BIT(X86_XCPT_DE)
860 | RT_BIT(X86_XCPT_NM)
861 | RT_BIT(X86_XCPT_TS)
862 | RT_BIT(X86_XCPT_UD)
863 | RT_BIT(X86_XCPT_NP)
864 | RT_BIT(X86_XCPT_SS)
865 | RT_BIT(X86_XCPT_GP)
866 | RT_BIT(X86_XCPT_PF)
867 | RT_BIT(X86_XCPT_MF));
868#elif defined(HMVMX_ALWAYS_TRAP_PF)
869 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
870#endif
871 if (uXcptMask)
872 {
873 /* Validate we are not removing any essential exception intercepts. */
874#ifndef IN_NEM_DARWIN
875 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
876#else
877 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
878#endif
879 NOREF(pVCpu);
880 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
881 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
882
883 /* Remove it from the exception bitmap. */
884 u32XcptBitmap &= ~uXcptMask;
885
886 /* Commit and update the cache if necessary. */
887 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
888 {
889 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
890 AssertRC(rc);
891 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
892 }
893 }
894 }
895 return VINF_SUCCESS;
896}
897
898
899/**
900 * Remove an exceptions from the exception bitmap and commits it to the current
901 * VMCS.
902 *
903 * @returns VBox status code.
904 * @param pVCpu The cross context virtual CPU structure.
905 * @param pVmxTransient The VMX-transient structure.
906 * @param uXcpt The exception to remove.
907 */
908static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
909{
910 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
911}
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
914
915/**
916 * Loads the shadow VMCS specified by the VMCS info. object.
917 *
918 * @returns VBox status code.
919 * @param pVmcsInfo The VMCS info. object.
920 *
921 * @remarks Can be called with interrupts disabled.
922 */
923static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
924{
925 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
926 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
927
928 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
929 if (RT_SUCCESS(rc))
930 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
931 return rc;
932}
933
934
935/**
936 * Clears the shadow VMCS specified by the VMCS info. object.
937 *
938 * @returns VBox status code.
939 * @param pVmcsInfo The VMCS info. object.
940 *
941 * @remarks Can be called with interrupts disabled.
942 */
943static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
944{
945 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
946 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
947
948 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
949 if (RT_SUCCESS(rc))
950 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
951 return rc;
952}
953
954
955/**
956 * Switches from and to the specified VMCSes.
957 *
958 * @returns VBox status code.
959 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
960 * @param pVmcsInfoTo The VMCS info. object we are switching to.
961 *
962 * @remarks Called with interrupts disabled.
963 */
964static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
965{
966 /*
967 * Clear the VMCS we are switching out if it has not already been cleared.
968 * This will sync any CPU internal data back to the VMCS.
969 */
970 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
971 {
972 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
973 if (RT_SUCCESS(rc))
974 {
975 /*
976 * The shadow VMCS, if any, would not be active at this point since we
977 * would have cleared it while importing the virtual hardware-virtualization
978 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
979 * clear the shadow VMCS here, just assert for safety.
980 */
981 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
982 }
983 else
984 return rc;
985 }
986
987 /*
988 * Clear the VMCS we are switching to if it has not already been cleared.
989 * This will initialize the VMCS launch state to "clear" required for loading it.
990 *
991 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
992 */
993 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
994 {
995 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
996 if (RT_SUCCESS(rc))
997 { /* likely */ }
998 else
999 return rc;
1000 }
1001
1002 /*
1003 * Finally, load the VMCS we are switching to.
1004 */
1005 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1006}
1007
1008
1009/**
1010 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1011 * caller.
1012 *
1013 * @returns VBox status code.
1014 * @param pVCpu The cross context virtual CPU structure.
1015 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1016 * true) or guest VMCS (pass false).
1017 */
1018static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1019{
1020 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1021 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1022
1023 PVMXVMCSINFO pVmcsInfoFrom;
1024 PVMXVMCSINFO pVmcsInfoTo;
1025 if (fSwitchToNstGstVmcs)
1026 {
1027 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1028 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1029 }
1030 else
1031 {
1032 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1033 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1034 }
1035
1036 /*
1037 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1038 * preemption hook code path acquires the current VMCS.
1039 */
1040 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1041
1042 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1043 if (RT_SUCCESS(rc))
1044 {
1045 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1046 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1047
1048 /*
1049 * If we are switching to a VMCS that was executed on a different host CPU or was
1050 * never executed before, flag that we need to export the host state before executing
1051 * guest/nested-guest code using hardware-assisted VMX.
1052 *
1053 * This could probably be done in a preemptible context since the preemption hook
1054 * will flag the necessary change in host context. However, since preemption is
1055 * already disabled and to avoid making assumptions about host specific code in
1056 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1057 * disabled.
1058 */
1059 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1060 { /* likely */ }
1061 else
1062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1063
1064 ASMSetFlags(fEFlags);
1065
1066 /*
1067 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1068 * flag that we need to update the host MSR values there. Even if we decide in the
1069 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1070 * if its content differs, we would have to update the host MSRs anyway.
1071 */
1072 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1073 }
1074 else
1075 ASMSetFlags(fEFlags);
1076 return rc;
1077}
1078
1079#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1080#ifdef VBOX_STRICT
1081
1082/**
1083 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1084 * transient structure.
1085 *
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param pVmxTransient The VMX-transient structure.
1088 */
1089DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1090{
1091 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1092 AssertRC(rc);
1093}
1094
1095
1096/**
1097 * Reads the VM-entry exception error code field from the VMCS into
1098 * the VMX transient structure.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure.
1101 * @param pVmxTransient The VMX-transient structure.
1102 */
1103DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1104{
1105 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1106 AssertRC(rc);
1107}
1108
1109
1110/**
1111 * Reads the VM-entry exception error code field from the VMCS into
1112 * the VMX transient structure.
1113 *
1114 * @param pVCpu The cross context virtual CPU structure.
1115 * @param pVmxTransient The VMX-transient structure.
1116 */
1117DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1118{
1119 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1120 AssertRC(rc);
1121}
1122
1123#endif /* VBOX_STRICT */
1124
1125
1126/**
1127 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1128 *
1129 * Don't call directly unless the it's likely that some or all of the fields
1130 * given in @a a_fReadMask have already been read.
1131 *
1132 * @tparam a_fReadMask The fields to read.
1133 * @param pVCpu The cross context virtual CPU structure.
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136template<uint32_t const a_fReadMask>
1137static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1138{
1139 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1140 | HMVMX_READ_EXIT_INSTR_LEN
1141 | HMVMX_READ_EXIT_INSTR_INFO
1142 | HMVMX_READ_IDT_VECTORING_INFO
1143 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1144 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1145 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1146 | HMVMX_READ_GUEST_LINEAR_ADDR
1147 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1148 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1149 )) == 0);
1150
1151 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1152 {
1153 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1154
1155 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1156 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1157 {
1158 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1159 AssertRC(rc);
1160 }
1161 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1162 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1163 {
1164 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1165 AssertRC(rc);
1166 }
1167 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1168 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1169 {
1170 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1171 AssertRC(rc);
1172 }
1173 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1174 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1175 {
1176 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1177 AssertRC(rc);
1178 }
1179 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1180 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1181 {
1182 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1183 AssertRC(rc);
1184 }
1185 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1186 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1187 {
1188 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1189 AssertRC(rc);
1190 }
1191 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1192 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1193 {
1194 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1195 AssertRC(rc);
1196 }
1197 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1198 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1199 {
1200 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1201 AssertRC(rc);
1202 }
1203 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1204 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1205 {
1206 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1207 AssertRC(rc);
1208 }
1209 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1210 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1211 {
1212 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1213 AssertRC(rc);
1214 }
1215
1216 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1217 }
1218}
1219
1220
1221/**
1222 * Reads VMCS fields into the VMXTRANSIENT structure.
1223 *
1224 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1225 * generating an optimized read sequences w/o any conditionals between in
1226 * non-strict builds.
1227 *
1228 * @tparam a_fReadMask The fields to read. One or more of the
1229 * HMVMX_READ_XXX fields ORed together.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 * @param pVmxTransient The VMX-transient structure.
1232 */
1233template<uint32_t const a_fReadMask>
1234DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1235{
1236 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1237 | HMVMX_READ_EXIT_INSTR_LEN
1238 | HMVMX_READ_EXIT_INSTR_INFO
1239 | HMVMX_READ_IDT_VECTORING_INFO
1240 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1241 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1242 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1243 | HMVMX_READ_GUEST_LINEAR_ADDR
1244 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1245 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1246 )) == 0);
1247
1248 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1249 {
1250 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1251 {
1252 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1253 AssertRC(rc);
1254 }
1255 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1256 {
1257 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1258 AssertRC(rc);
1259 }
1260 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1261 {
1262 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1263 AssertRC(rc);
1264 }
1265 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1266 {
1267 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1268 AssertRC(rc);
1269 }
1270 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1271 {
1272 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1273 AssertRC(rc);
1274 }
1275 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1276 {
1277 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1278 AssertRC(rc);
1279 }
1280 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1281 {
1282 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1283 AssertRC(rc);
1284 }
1285 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1286 {
1287 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1288 AssertRC(rc);
1289 }
1290 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1291 {
1292 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1293 AssertRC(rc);
1294 }
1295 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1296 {
1297 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1298 AssertRC(rc);
1299 }
1300
1301 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1302 }
1303 else
1304 {
1305 /** @todo add a release counter. */
1306 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1307 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1308 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1309 }
1310}
1311
1312
1313#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1314/**
1315 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1316 *
1317 * @param pVCpu The cross context virtual CPU structure.
1318 * @param pVmxTransient The VMX-transient structure.
1319 */
1320static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1321{
1322 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1323 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1324 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1325 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1326 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1327 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1328 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1329 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1330 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1331 AssertRC(rc);
1332 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1333 | HMVMX_READ_EXIT_INSTR_LEN
1334 | HMVMX_READ_EXIT_INSTR_INFO
1335 | HMVMX_READ_IDT_VECTORING_INFO
1336 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1337 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1338 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1339 | HMVMX_READ_GUEST_LINEAR_ADDR
1340 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1341}
1342#endif
1343
1344/**
1345 * Verifies that our cached values of the VMCS fields are all consistent with
1346 * what's actually present in the VMCS.
1347 *
1348 * @returns VBox status code.
1349 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1350 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1351 * VMCS content. HMCPU error-field is
1352 * updated, see VMX_VCI_XXX.
1353 * @param pVCpu The cross context virtual CPU structure.
1354 * @param pVmcsInfo The VMCS info. object.
1355 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1356 */
1357static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1358{
1359 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1360
1361 uint32_t u32Val;
1362 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1363 AssertRC(rc);
1364 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1365 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1366 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1367 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1368
1369 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1372 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1379 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1386 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1391 {
1392 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1395 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398 }
1399
1400 uint64_t u64Val;
1401 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1402 {
1403 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1404 AssertRC(rc);
1405 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1406 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1407 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1408 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1409 }
1410
1411 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1414 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417
1418 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1421 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 NOREF(pcszVmcs);
1426 return VINF_SUCCESS;
1427}
1428
1429
1430/**
1431 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1432 * VMCS.
1433 *
1434 * This is typically required when the guest changes paging mode.
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The cross context virtual CPU structure.
1438 * @param pVmxTransient The VMX-transient structure.
1439 *
1440 * @remarks Requires EFER.
1441 * @remarks No-long-jump zone!!!
1442 */
1443static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1444{
1445 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1446 {
1447 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1448 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1449
1450 /*
1451 * VM-entry controls.
1452 */
1453 {
1454 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1455 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1456
1457 /*
1458 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1459 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1460 *
1461 * For nested-guests, this is a mandatory VM-entry control. It's also
1462 * required because we do not want to leak host bits to the nested-guest.
1463 */
1464 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1465
1466 /*
1467 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1468 *
1469 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1470 * required to get the nested-guest working with hardware-assisted VMX execution.
1471 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1472 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1473 * here rather than while merging the guest VMCS controls.
1474 */
1475 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1476 {
1477 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1478 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1479 }
1480 else
1481 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1482
1483 /*
1484 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1485 *
1486 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1487 * regardless of whether the nested-guest VMCS specifies it because we are free to
1488 * load whatever MSRs we require and we do not need to modify the guest visible copy
1489 * of the VM-entry MSR load area.
1490 */
1491 if ( g_fHmVmxSupportsVmcsEfer
1492#ifndef IN_NEM_DARWIN
1493 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1494#endif
1495 )
1496 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1497 else
1498 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1499
1500 /*
1501 * The following should -not- be set (since we're not in SMM mode):
1502 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1503 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1504 */
1505
1506 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1507 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1508
1509 if ((fVal & fZap) == fVal)
1510 { /* likely */ }
1511 else
1512 {
1513 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1514 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1515 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1516 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1517 }
1518
1519 /* Commit it to the VMCS. */
1520 if (pVmcsInfo->u32EntryCtls != fVal)
1521 {
1522 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1523 AssertRC(rc);
1524 pVmcsInfo->u32EntryCtls = fVal;
1525 }
1526 }
1527
1528 /*
1529 * VM-exit controls.
1530 */
1531 {
1532 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1533 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1534
1535 /*
1536 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1537 * supported the 1-setting of this bit.
1538 *
1539 * For nested-guests, we set the "save debug controls" as the converse
1540 * "load debug controls" is mandatory for nested-guests anyway.
1541 */
1542 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1543
1544 /*
1545 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1546 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1547 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1548 * vmxHCExportHostMsrs().
1549 *
1550 * For nested-guests, we always set this bit as we do not support 32-bit
1551 * hosts.
1552 */
1553 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1554
1555#ifndef IN_NEM_DARWIN
1556 /*
1557 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1558 *
1559 * For nested-guests, we should use the "save IA32_EFER" control if we also
1560 * used the "load IA32_EFER" control while exporting VM-entry controls.
1561 */
1562 if ( g_fHmVmxSupportsVmcsEfer
1563 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1564 {
1565 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1566 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1567 }
1568#endif
1569
1570 /*
1571 * Enable saving of the VMX-preemption timer value on VM-exit.
1572 * For nested-guests, currently not exposed/used.
1573 */
1574 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1575 * the timer value. */
1576 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1577 {
1578 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1579 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1580 }
1581
1582 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1583 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1584
1585 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1586 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1587 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1588
1589 if ((fVal & fZap) == fVal)
1590 { /* likely */ }
1591 else
1592 {
1593 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1594 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1595 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1596 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1597 }
1598
1599 /* Commit it to the VMCS. */
1600 if (pVmcsInfo->u32ExitCtls != fVal)
1601 {
1602 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1603 AssertRC(rc);
1604 pVmcsInfo->u32ExitCtls = fVal;
1605 }
1606 }
1607
1608 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1609 }
1610 return VINF_SUCCESS;
1611}
1612
1613
1614/**
1615 * Sets the TPR threshold in the VMCS.
1616 *
1617 * @param pVCpu The cross context virtual CPU structure.
1618 * @param pVmcsInfo The VMCS info. object.
1619 * @param u32TprThreshold The TPR threshold (task-priority class only).
1620 */
1621DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1622{
1623 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1624 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1625 RT_NOREF(pVmcsInfo);
1626 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1627 AssertRC(rc);
1628}
1629
1630
1631/**
1632 * Exports the guest APIC TPR state into the VMCS.
1633 *
1634 * @param pVCpu The cross context virtual CPU structure.
1635 * @param pVmxTransient The VMX-transient structure.
1636 *
1637 * @remarks No-long-jump zone!!!
1638 */
1639static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1640{
1641 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1642 {
1643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1644
1645 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1646 if (!pVmxTransient->fIsNestedGuest)
1647 {
1648 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1649 && APICIsEnabled(pVCpu))
1650 {
1651 /*
1652 * Setup TPR shadowing.
1653 */
1654 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1655 {
1656 bool fPendingIntr = false;
1657 uint8_t u8Tpr = 0;
1658 uint8_t u8PendingIntr = 0;
1659 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1660 AssertRC(rc);
1661
1662 /*
1663 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1664 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1665 * priority of the pending interrupt so we can deliver the interrupt. If there
1666 * are no interrupts pending, set threshold to 0 to not cause any
1667 * TPR-below-threshold VM-exits.
1668 */
1669 uint32_t u32TprThreshold = 0;
1670 if (fPendingIntr)
1671 {
1672 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1673 (which is the Task-Priority Class). */
1674 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1675 const uint8_t u8TprPriority = u8Tpr >> 4;
1676 if (u8PendingPriority <= u8TprPriority)
1677 u32TprThreshold = u8PendingPriority;
1678 }
1679
1680 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1681 }
1682 }
1683 }
1684 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1685 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1686 }
1687}
1688
1689
1690/**
1691 * Gets the guest interruptibility-state and updates related force-flags.
1692 *
1693 * @returns Guest's interruptibility-state.
1694 * @param pVCpu The cross context virtual CPU structure.
1695 *
1696 * @remarks No-long-jump zone!!!
1697 */
1698static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1699{
1700 /*
1701 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1702 */
1703 uint32_t fIntrState = 0;
1704 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1705 {
1706 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1707 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1708
1709 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1710 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1711 {
1712 if (pCtx->eflags.Bits.u1IF)
1713 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1714 else
1715 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1716 }
1717 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1718 {
1719 /*
1720 * We can clear the inhibit force flag as even if we go back to the recompiler
1721 * without executing guest code in VT-x, the flag's condition to be cleared is
1722 * met and thus the cleared state is correct.
1723 */
1724 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1725 }
1726 }
1727
1728 /*
1729 * Check if we should inhibit NMI delivery.
1730 */
1731 if (CPUMIsGuestNmiBlocking(pVCpu))
1732 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1733
1734 /*
1735 * Validate.
1736 */
1737#ifdef VBOX_STRICT
1738 /* We don't support block-by-SMI yet.*/
1739 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1740
1741 /* Block-by-STI must not be set when interrupts are disabled. */
1742 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1743 {
1744 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1745 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1746 }
1747#endif
1748
1749 return fIntrState;
1750}
1751
1752
1753/**
1754 * Exports the exception intercepts required for guest execution in the VMCS.
1755 *
1756 * @param pVCpu The cross context virtual CPU structure.
1757 * @param pVmxTransient The VMX-transient structure.
1758 *
1759 * @remarks No-long-jump zone!!!
1760 */
1761static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1762{
1763 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1764 {
1765 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1766 if ( !pVmxTransient->fIsNestedGuest
1767 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1768 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1769 else
1770 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1771
1772 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1773 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1774 }
1775}
1776
1777
1778/**
1779 * Exports the guest's RIP into the guest-state area in the VMCS.
1780 *
1781 * @param pVCpu The cross context virtual CPU structure.
1782 *
1783 * @remarks No-long-jump zone!!!
1784 */
1785static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1786{
1787 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1788 {
1789 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1790
1791 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1792 AssertRC(rc);
1793
1794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1795 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1796 }
1797}
1798
1799
1800/**
1801 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1802 *
1803 * @param pVCpu The cross context virtual CPU structure.
1804 * @param pVmxTransient The VMX-transient structure.
1805 *
1806 * @remarks No-long-jump zone!!!
1807 */
1808static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1809{
1810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1811 {
1812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1813
1814 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1815 Let us assert it as such and use 32-bit VMWRITE. */
1816 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1817 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1818 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1819 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1820
1821#ifndef IN_NEM_DARWIN
1822 /*
1823 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1824 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1825 * can run the real-mode guest code under Virtual 8086 mode.
1826 */
1827 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1828 if (pVmcsInfo->RealMode.fRealOnV86Active)
1829 {
1830 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1831 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1832 Assert(!pVmxTransient->fIsNestedGuest);
1833 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1834 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1835 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1836 }
1837#else
1838 RT_NOREF(pVmxTransient);
1839#endif
1840
1841 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1842 AssertRC(rc);
1843
1844 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1845 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1846 }
1847}
1848
1849
1850#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1851/**
1852 * Copies the nested-guest VMCS to the shadow VMCS.
1853 *
1854 * @returns VBox status code.
1855 * @param pVCpu The cross context virtual CPU structure.
1856 * @param pVmcsInfo The VMCS info. object.
1857 *
1858 * @remarks No-long-jump zone!!!
1859 */
1860static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1861{
1862 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1863 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1864
1865 /*
1866 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1867 * current VMCS, as we may try saving guest lazy MSRs.
1868 *
1869 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1870 * calling the import VMCS code which is currently performing the guest MSR reads
1871 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1872 * and the rest of the VMX leave session machinery.
1873 */
1874 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1875
1876 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1877 if (RT_SUCCESS(rc))
1878 {
1879 /*
1880 * Copy all guest read/write VMCS fields.
1881 *
1882 * We don't check for VMWRITE failures here for performance reasons and
1883 * because they are not expected to fail, barring irrecoverable conditions
1884 * like hardware errors.
1885 */
1886 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1887 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1888 {
1889 uint64_t u64Val;
1890 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1891 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1892 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1893 }
1894
1895 /*
1896 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1897 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1898 */
1899 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1900 {
1901 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1902 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1903 {
1904 uint64_t u64Val;
1905 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1906 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1907 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1908 }
1909 }
1910
1911 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1912 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1913 }
1914
1915 ASMSetFlags(fEFlags);
1916 return rc;
1917}
1918
1919
1920/**
1921 * Copies the shadow VMCS to the nested-guest VMCS.
1922 *
1923 * @returns VBox status code.
1924 * @param pVCpu The cross context virtual CPU structure.
1925 * @param pVmcsInfo The VMCS info. object.
1926 *
1927 * @remarks Called with interrupts disabled.
1928 */
1929static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1930{
1931 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1932 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1933 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1934
1935 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1936 if (RT_SUCCESS(rc))
1937 {
1938 /*
1939 * Copy guest read/write fields from the shadow VMCS.
1940 * Guest read-only fields cannot be modified, so no need to copy them.
1941 *
1942 * We don't check for VMREAD failures here for performance reasons and
1943 * because they are not expected to fail, barring irrecoverable conditions
1944 * like hardware errors.
1945 */
1946 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1947 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1948 {
1949 uint64_t u64Val;
1950 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1951 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1952 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1953 }
1954
1955 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1956 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1957 }
1958 return rc;
1959}
1960
1961
1962/**
1963 * Enables VMCS shadowing for the given VMCS info. object.
1964 *
1965 * @param pVCpu The cross context virtual CPU structure.
1966 * @param pVmcsInfo The VMCS info. object.
1967 *
1968 * @remarks No-long-jump zone!!!
1969 */
1970static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1971{
1972 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1973 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1974 {
1975 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1976 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1977 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1978 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1979 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1980 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1981 Log4Func(("Enabled\n"));
1982 }
1983}
1984
1985
1986/**
1987 * Disables VMCS shadowing for the given VMCS info. object.
1988 *
1989 * @param pVCpu The cross context virtual CPU structure.
1990 * @param pVmcsInfo The VMCS info. object.
1991 *
1992 * @remarks No-long-jump zone!!!
1993 */
1994static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1995{
1996 /*
1997 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1998 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1999 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2000 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2001 *
2002 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2003 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2004 */
2005 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2006 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2007 {
2008 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2009 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2010 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2011 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2012 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2013 Log4Func(("Disabled\n"));
2014 }
2015}
2016#endif
2017
2018
2019/**
2020 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2021 *
2022 * The guest FPU state is always pre-loaded hence we don't need to bother about
2023 * sharing FPU related CR0 bits between the guest and host.
2024 *
2025 * @returns VBox status code.
2026 * @param pVCpu The cross context virtual CPU structure.
2027 * @param pVmxTransient The VMX-transient structure.
2028 *
2029 * @remarks No-long-jump zone!!!
2030 */
2031static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2032{
2033 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2034 {
2035 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2036 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2037
2038 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2039 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2040 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2041 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2042 else
2043 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2044
2045 if (!pVmxTransient->fIsNestedGuest)
2046 {
2047 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2048 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2049 uint64_t const u64ShadowCr0 = u64GuestCr0;
2050 Assert(!RT_HI_U32(u64GuestCr0));
2051
2052 /*
2053 * Setup VT-x's view of the guest CR0.
2054 */
2055 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2056 if (VM_IS_VMX_NESTED_PAGING(pVM))
2057 {
2058#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2059 if (CPUMIsGuestPagingEnabled(pVCpu))
2060 {
2061 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2062 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2063 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2064 }
2065 else
2066 {
2067 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2068 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2069 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2070 }
2071
2072 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2073 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2074 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2075#endif
2076 }
2077 else
2078 {
2079 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2080 u64GuestCr0 |= X86_CR0_WP;
2081 }
2082
2083 /*
2084 * Guest FPU bits.
2085 *
2086 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2087 * using CR0.TS.
2088 *
2089 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2090 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2091 */
2092 u64GuestCr0 |= X86_CR0_NE;
2093
2094 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2095 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2096
2097 /*
2098 * Update exception intercepts.
2099 */
2100 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2101#ifndef IN_NEM_DARWIN
2102 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2103 {
2104 Assert(PDMVmmDevHeapIsEnabled(pVM));
2105 Assert(pVM->hm.s.vmx.pRealModeTSS);
2106 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2107 }
2108 else
2109#endif
2110 {
2111 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2112 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2113 if (fInterceptMF)
2114 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2115 }
2116
2117 /* Additional intercepts for debugging, define these yourself explicitly. */
2118#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2119 uXcptBitmap |= 0
2120 | RT_BIT(X86_XCPT_BP)
2121 | RT_BIT(X86_XCPT_DE)
2122 | RT_BIT(X86_XCPT_NM)
2123 | RT_BIT(X86_XCPT_TS)
2124 | RT_BIT(X86_XCPT_UD)
2125 | RT_BIT(X86_XCPT_NP)
2126 | RT_BIT(X86_XCPT_SS)
2127 | RT_BIT(X86_XCPT_GP)
2128 | RT_BIT(X86_XCPT_PF)
2129 | RT_BIT(X86_XCPT_MF)
2130 ;
2131#elif defined(HMVMX_ALWAYS_TRAP_PF)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2133#endif
2134 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2135 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2136 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2137 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2138 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2139
2140 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2141 u64GuestCr0 |= fSetCr0;
2142 u64GuestCr0 &= fZapCr0;
2143 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177 Assert(!RT_HI_U32(u64GuestCr0));
2178 Assert(u64GuestCr0 & X86_CR0_NE);
2179
2180 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2181 u64GuestCr0 |= fSetCr0;
2182 u64GuestCr0 &= fZapCr0;
2183 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2184
2185 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2186 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2187 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2188
2189 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2190 }
2191
2192 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2193 }
2194
2195 return VINF_SUCCESS;
2196}
2197
2198
2199/**
2200 * Exports the guest control registers (CR3, CR4) into the guest-state area
2201 * in the VMCS.
2202 *
2203 * @returns VBox strict status code.
2204 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2205 * without unrestricted guest access and the VMMDev is not presently
2206 * mapped (e.g. EFI32).
2207 *
2208 * @param pVCpu The cross context virtual CPU structure.
2209 * @param pVmxTransient The VMX-transient structure.
2210 *
2211 * @remarks No-long-jump zone!!!
2212 */
2213static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2214{
2215 int rc = VINF_SUCCESS;
2216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2217
2218 /*
2219 * Guest CR2.
2220 * It's always loaded in the assembler code. Nothing to do here.
2221 */
2222
2223 /*
2224 * Guest CR3.
2225 */
2226 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2227 {
2228 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2229
2230 if (VM_IS_VMX_NESTED_PAGING(pVM))
2231 {
2232#ifndef IN_NEM_DARWIN
2233 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2234 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2235
2236 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2237 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2238 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2239 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2240
2241 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2242 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2243 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2244
2245 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2246 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2247 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2248 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2249 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2250 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2251 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2252
2253 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2254 AssertRC(rc);
2255#endif
2256
2257 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2258 uint64_t u64GuestCr3 = pCtx->cr3;
2259 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2260 || CPUMIsGuestPagingEnabledEx(pCtx))
2261 {
2262 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2263 if (CPUMIsGuestInPAEModeEx(pCtx))
2264 {
2265 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2266 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2269 }
2270
2271 /*
2272 * The guest's view of its CR3 is unblemished with nested paging when the
2273 * guest is using paging or we have unrestricted guest execution to handle
2274 * the guest when it's not using paging.
2275 */
2276 }
2277#ifndef IN_NEM_DARWIN
2278 else
2279 {
2280 /*
2281 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2282 * thinks it accesses physical memory directly, we use our identity-mapped
2283 * page table to map guest-linear to guest-physical addresses. EPT takes care
2284 * of translating it to host-physical addresses.
2285 */
2286 RTGCPHYS GCPhys;
2287 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2288
2289 /* We obtain it here every time as the guest could have relocated this PCI region. */
2290 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2291 if (RT_SUCCESS(rc))
2292 { /* likely */ }
2293 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2294 {
2295 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2296 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2297 }
2298 else
2299 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2300
2301 u64GuestCr3 = GCPhys;
2302 }
2303#endif
2304
2305 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2306 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2307 AssertRC(rc);
2308 }
2309 else
2310 {
2311 Assert(!pVmxTransient->fIsNestedGuest);
2312 /* Non-nested paging case, just use the hypervisor's CR3. */
2313 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2314
2315 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2316 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2317 AssertRC(rc);
2318 }
2319
2320 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2321 }
2322
2323 /*
2324 * Guest CR4.
2325 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2326 */
2327 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2328 {
2329 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2330 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2331
2332 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2333 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2334
2335 /*
2336 * With nested-guests, we may have extended the guest/host mask here (since we
2337 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2338 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2339 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2340 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2341 */
2342 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2343 uint64_t u64GuestCr4 = pCtx->cr4;
2344 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2345 ? pCtx->cr4
2346 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2347 Assert(!RT_HI_U32(u64GuestCr4));
2348
2349#ifndef IN_NEM_DARWIN
2350 /*
2351 * Setup VT-x's view of the guest CR4.
2352 *
2353 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2354 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2355 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2356 *
2357 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2358 */
2359 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2360 {
2361 Assert(pVM->hm.s.vmx.pRealModeTSS);
2362 Assert(PDMVmmDevHeapIsEnabled(pVM));
2363 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2364 }
2365#endif
2366
2367 if (VM_IS_VMX_NESTED_PAGING(pVM))
2368 {
2369 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2370 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2371 {
2372 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2373 u64GuestCr4 |= X86_CR4_PSE;
2374 /* Our identity mapping is a 32-bit page directory. */
2375 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2376 }
2377 /* else use guest CR4.*/
2378 }
2379 else
2380 {
2381 Assert(!pVmxTransient->fIsNestedGuest);
2382
2383 /*
2384 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2385 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2386 */
2387 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2388 {
2389 case PGMMODE_REAL: /* Real-mode. */
2390 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2391 case PGMMODE_32_BIT: /* 32-bit paging. */
2392 {
2393 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2394 break;
2395 }
2396
2397 case PGMMODE_PAE: /* PAE paging. */
2398 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2399 {
2400 u64GuestCr4 |= X86_CR4_PAE;
2401 break;
2402 }
2403
2404 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2405 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2406 {
2407#ifdef VBOX_WITH_64_BITS_GUESTS
2408 /* For our assumption in vmxHCShouldSwapEferMsr. */
2409 Assert(u64GuestCr4 & X86_CR4_PAE);
2410 break;
2411#endif
2412 }
2413 default:
2414 AssertFailed();
2415 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2416 }
2417 }
2418
2419 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2420 u64GuestCr4 |= fSetCr4;
2421 u64GuestCr4 &= fZapCr4;
2422
2423 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2424 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2425 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2426
2427#ifndef IN_NEM_DARWIN
2428 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2429 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2430 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2431 {
2432 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2433 hmR0VmxUpdateStartVmFunction(pVCpu);
2434 }
2435#endif
2436
2437 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2438
2439 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2440 }
2441 return rc;
2442}
2443
2444
2445#ifdef VBOX_STRICT
2446/**
2447 * Strict function to validate segment registers.
2448 *
2449 * @param pVCpu The cross context virtual CPU structure.
2450 * @param pVmcsInfo The VMCS info. object.
2451 *
2452 * @remarks Will import guest CR0 on strict builds during validation of
2453 * segments.
2454 */
2455static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2456{
2457 /*
2458 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2459 *
2460 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2461 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2462 * unusable bit and doesn't change the guest-context value.
2463 */
2464 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2465 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2466 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2467 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2468 && ( !CPUMIsGuestInRealModeEx(pCtx)
2469 && !CPUMIsGuestInV86ModeEx(pCtx)))
2470 {
2471 /* Protected mode checks */
2472 /* CS */
2473 Assert(pCtx->cs.Attr.n.u1Present);
2474 Assert(!(pCtx->cs.Attr.u & 0xf00));
2475 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2476 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2477 || !(pCtx->cs.Attr.n.u1Granularity));
2478 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2479 || (pCtx->cs.Attr.n.u1Granularity));
2480 /* CS cannot be loaded with NULL in protected mode. */
2481 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2482 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2483 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2484 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2485 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2486 else
2487 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2488 /* SS */
2489 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2490 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2491 if ( !(pCtx->cr0 & X86_CR0_PE)
2492 || pCtx->cs.Attr.n.u4Type == 3)
2493 {
2494 Assert(!pCtx->ss.Attr.n.u2Dpl);
2495 }
2496 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2497 {
2498 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2499 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2500 Assert(pCtx->ss.Attr.n.u1Present);
2501 Assert(!(pCtx->ss.Attr.u & 0xf00));
2502 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2503 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2504 || !(pCtx->ss.Attr.n.u1Granularity));
2505 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2506 || (pCtx->ss.Attr.n.u1Granularity));
2507 }
2508 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2509 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2510 {
2511 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2512 Assert(pCtx->ds.Attr.n.u1Present);
2513 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2514 Assert(!(pCtx->ds.Attr.u & 0xf00));
2515 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2516 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2517 || !(pCtx->ds.Attr.n.u1Granularity));
2518 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2519 || (pCtx->ds.Attr.n.u1Granularity));
2520 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2521 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2522 }
2523 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2524 {
2525 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2526 Assert(pCtx->es.Attr.n.u1Present);
2527 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2528 Assert(!(pCtx->es.Attr.u & 0xf00));
2529 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2530 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2531 || !(pCtx->es.Attr.n.u1Granularity));
2532 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2533 || (pCtx->es.Attr.n.u1Granularity));
2534 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2535 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2536 }
2537 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2538 {
2539 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2540 Assert(pCtx->fs.Attr.n.u1Present);
2541 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2542 Assert(!(pCtx->fs.Attr.u & 0xf00));
2543 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2544 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2545 || !(pCtx->fs.Attr.n.u1Granularity));
2546 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2547 || (pCtx->fs.Attr.n.u1Granularity));
2548 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2549 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2550 }
2551 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2552 {
2553 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2554 Assert(pCtx->gs.Attr.n.u1Present);
2555 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2556 Assert(!(pCtx->gs.Attr.u & 0xf00));
2557 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2558 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2559 || !(pCtx->gs.Attr.n.u1Granularity));
2560 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2561 || (pCtx->gs.Attr.n.u1Granularity));
2562 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2563 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2564 }
2565 /* 64-bit capable CPUs. */
2566 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2567 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2568 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2569 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2570 }
2571 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2572 || ( CPUMIsGuestInRealModeEx(pCtx)
2573 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2574 {
2575 /* Real and v86 mode checks. */
2576 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2577 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2578#ifndef IN_NEM_DARWIN
2579 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2580 {
2581 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2582 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2583 }
2584 else
2585#endif
2586 {
2587 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2588 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2589 }
2590
2591 /* CS */
2592 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2593 Assert(pCtx->cs.u32Limit == 0xffff);
2594 Assert(u32CSAttr == 0xf3);
2595 /* SS */
2596 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2597 Assert(pCtx->ss.u32Limit == 0xffff);
2598 Assert(u32SSAttr == 0xf3);
2599 /* DS */
2600 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2601 Assert(pCtx->ds.u32Limit == 0xffff);
2602 Assert(u32DSAttr == 0xf3);
2603 /* ES */
2604 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2605 Assert(pCtx->es.u32Limit == 0xffff);
2606 Assert(u32ESAttr == 0xf3);
2607 /* FS */
2608 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2609 Assert(pCtx->fs.u32Limit == 0xffff);
2610 Assert(u32FSAttr == 0xf3);
2611 /* GS */
2612 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2613 Assert(pCtx->gs.u32Limit == 0xffff);
2614 Assert(u32GSAttr == 0xf3);
2615 /* 64-bit capable CPUs. */
2616 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2617 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2618 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2619 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2620 }
2621}
2622#endif /* VBOX_STRICT */
2623
2624
2625/**
2626 * Exports a guest segment register into the guest-state area in the VMCS.
2627 *
2628 * @returns VBox status code.
2629 * @param pVCpu The cross context virtual CPU structure.
2630 * @param pVmcsInfo The VMCS info. object.
2631 * @param iSegReg The segment register number (X86_SREG_XXX).
2632 * @param pSelReg Pointer to the segment selector.
2633 *
2634 * @remarks No-long-jump zone!!!
2635 */
2636static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2637{
2638 Assert(iSegReg < X86_SREG_COUNT);
2639
2640 uint32_t u32Access = pSelReg->Attr.u;
2641#ifndef IN_NEM_DARWIN
2642 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2643#endif
2644 {
2645 /*
2646 * The way to differentiate between whether this is really a null selector or was just
2647 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2648 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2649 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2650 * NULL selectors loaded in protected-mode have their attribute as 0.
2651 */
2652 if (u32Access)
2653 { }
2654 else
2655 u32Access = X86DESCATTR_UNUSABLE;
2656 }
2657#ifndef IN_NEM_DARWIN
2658 else
2659 {
2660 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2661 u32Access = 0xf3;
2662 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2663 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2664 RT_NOREF_PV(pVCpu);
2665 }
2666#else
2667 RT_NOREF(pVmcsInfo);
2668#endif
2669
2670 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2671 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2672 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2673
2674 /*
2675 * Commit it to the VMCS.
2676 */
2677 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2678 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2679 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2680 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2681 return VINF_SUCCESS;
2682}
2683
2684
2685/**
2686 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2687 * area in the VMCS.
2688 *
2689 * @returns VBox status code.
2690 * @param pVCpu The cross context virtual CPU structure.
2691 * @param pVmxTransient The VMX-transient structure.
2692 *
2693 * @remarks Will import guest CR0 on strict builds during validation of
2694 * segments.
2695 * @remarks No-long-jump zone!!!
2696 */
2697static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2698{
2699 int rc = VERR_INTERNAL_ERROR_5;
2700#ifndef IN_NEM_DARWIN
2701 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2702#endif
2703 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2704 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2705#ifndef IN_NEM_DARWIN
2706 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2707#endif
2708
2709 /*
2710 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2711 */
2712 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2713 {
2714 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2715 {
2716 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2717#ifndef IN_NEM_DARWIN
2718 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2719 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2720#endif
2721 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2722 AssertRC(rc);
2723 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2724 }
2725
2726 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2727 {
2728 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2729#ifndef IN_NEM_DARWIN
2730 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2731 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2732#endif
2733 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2734 AssertRC(rc);
2735 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2736 }
2737
2738 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2739 {
2740 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2741#ifndef IN_NEM_DARWIN
2742 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2743 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2744#endif
2745 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2746 AssertRC(rc);
2747 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2748 }
2749
2750 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2751 {
2752 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2753#ifndef IN_NEM_DARWIN
2754 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2755 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2756#endif
2757 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2758 AssertRC(rc);
2759 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2760 }
2761
2762 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2763 {
2764 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2765#ifndef IN_NEM_DARWIN
2766 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2767 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2768#endif
2769 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2770 AssertRC(rc);
2771 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2772 }
2773
2774 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2775 {
2776 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2777#ifndef IN_NEM_DARWIN
2778 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2779 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2780#endif
2781 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2782 AssertRC(rc);
2783 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2784 }
2785
2786#ifdef VBOX_STRICT
2787 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2788#endif
2789 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2790 pCtx->cs.Attr.u));
2791 }
2792
2793 /*
2794 * Guest TR.
2795 */
2796 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2797 {
2798 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2799
2800 /*
2801 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2802 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2803 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2804 */
2805 uint16_t u16Sel;
2806 uint32_t u32Limit;
2807 uint64_t u64Base;
2808 uint32_t u32AccessRights;
2809#ifndef IN_NEM_DARWIN
2810 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2811#endif
2812 {
2813 u16Sel = pCtx->tr.Sel;
2814 u32Limit = pCtx->tr.u32Limit;
2815 u64Base = pCtx->tr.u64Base;
2816 u32AccessRights = pCtx->tr.Attr.u;
2817 }
2818#ifndef IN_NEM_DARWIN
2819 else
2820 {
2821 Assert(!pVmxTransient->fIsNestedGuest);
2822 Assert(pVM->hm.s.vmx.pRealModeTSS);
2823 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2824
2825 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2826 RTGCPHYS GCPhys;
2827 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2828 AssertRCReturn(rc, rc);
2829
2830 X86DESCATTR DescAttr;
2831 DescAttr.u = 0;
2832 DescAttr.n.u1Present = 1;
2833 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2834
2835 u16Sel = 0;
2836 u32Limit = HM_VTX_TSS_SIZE;
2837 u64Base = GCPhys;
2838 u32AccessRights = DescAttr.u;
2839 }
2840#endif
2841
2842 /* Validate. */
2843 Assert(!(u16Sel & RT_BIT(2)));
2844 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2845 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2846 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2847 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2848 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2849 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2850 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2851 Assert( (u32Limit & 0xfff) == 0xfff
2852 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2853 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2854 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2855
2856 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2857 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2858 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2859 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2860
2861 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2862 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2863 }
2864
2865 /*
2866 * Guest GDTR.
2867 */
2868 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2869 {
2870 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2871
2872 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2873 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2874
2875 /* Validate. */
2876 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2877
2878 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2879 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2880 }
2881
2882 /*
2883 * Guest LDTR.
2884 */
2885 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2886 {
2887 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2888
2889 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2890 uint32_t u32Access;
2891 if ( !pVmxTransient->fIsNestedGuest
2892 && !pCtx->ldtr.Attr.u)
2893 u32Access = X86DESCATTR_UNUSABLE;
2894 else
2895 u32Access = pCtx->ldtr.Attr.u;
2896
2897 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2898 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2899 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2900 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2901
2902 /* Validate. */
2903 if (!(u32Access & X86DESCATTR_UNUSABLE))
2904 {
2905 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2906 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2907 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2908 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2909 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2910 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2911 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2912 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2913 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2914 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2915 }
2916
2917 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2918 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2919 }
2920
2921 /*
2922 * Guest IDTR.
2923 */
2924 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2925 {
2926 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2927
2928 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2929 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2930
2931 /* Validate. */
2932 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2933
2934 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2935 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2936 }
2937
2938 return VINF_SUCCESS;
2939}
2940
2941
2942/**
2943 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2944 * VM-exit interruption info type.
2945 *
2946 * @returns The IEM exception flags.
2947 * @param uVector The event vector.
2948 * @param uVmxEventType The VMX event type.
2949 *
2950 * @remarks This function currently only constructs flags required for
2951 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2952 * and CR2 aspects of an exception are not included).
2953 */
2954static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2955{
2956 uint32_t fIemXcptFlags;
2957 switch (uVmxEventType)
2958 {
2959 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2960 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2961 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2962 break;
2963
2964 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2965 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2966 break;
2967
2968 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2969 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2970 break;
2971
2972 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2973 {
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2975 if (uVector == X86_XCPT_BP)
2976 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2977 else if (uVector == X86_XCPT_OF)
2978 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2979 else
2980 {
2981 fIemXcptFlags = 0;
2982 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2983 }
2984 break;
2985 }
2986
2987 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2988 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2989 break;
2990
2991 default:
2992 fIemXcptFlags = 0;
2993 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2994 break;
2995 }
2996 return fIemXcptFlags;
2997}
2998
2999
3000/**
3001 * Sets an event as a pending event to be injected into the guest.
3002 *
3003 * @param pVCpu The cross context virtual CPU structure.
3004 * @param u32IntInfo The VM-entry interruption-information field.
3005 * @param cbInstr The VM-entry instruction length in bytes (for
3006 * software interrupts, exceptions and privileged
3007 * software exceptions).
3008 * @param u32ErrCode The VM-entry exception error code.
3009 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3010 * page-fault.
3011 */
3012DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3013 RTGCUINTPTR GCPtrFaultAddress)
3014{
3015 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3016 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3017 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3018 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3019 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3020 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3021}
3022
3023
3024/**
3025 * Sets an external interrupt as pending-for-injection into the VM.
3026 *
3027 * @param pVCpu The cross context virtual CPU structure.
3028 * @param u8Interrupt The external interrupt vector.
3029 */
3030DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3031{
3032 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3033 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3034 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3035 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3036 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3037}
3038
3039
3040/**
3041 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3042 *
3043 * @param pVCpu The cross context virtual CPU structure.
3044 */
3045DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3046{
3047 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3048 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3049 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3050 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3051 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3052}
3053
3054
3055/**
3056 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3057 *
3058 * @param pVCpu The cross context virtual CPU structure.
3059 */
3060DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3061{
3062 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3063 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3064 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3065 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3066 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3067}
3068
3069
3070/**
3071 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3072 *
3073 * @param pVCpu The cross context virtual CPU structure.
3074 */
3075DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3076{
3077 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3078 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3079 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3080 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3081 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3082}
3083
3084
3085/**
3086 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3087 *
3088 * @param pVCpu The cross context virtual CPU structure.
3089 */
3090DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3091{
3092 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3093 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3094 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3095 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3096 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3097}
3098
3099
3100#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3101/**
3102 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3103 *
3104 * @param pVCpu The cross context virtual CPU structure.
3105 * @param u32ErrCode The error code for the general-protection exception.
3106 */
3107DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3108{
3109 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3110 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3111 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3112 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3113 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3114}
3115
3116
3117/**
3118 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3119 *
3120 * @param pVCpu The cross context virtual CPU structure.
3121 * @param u32ErrCode The error code for the stack exception.
3122 */
3123DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3124{
3125 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3127 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3129 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3130}
3131#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3132
3133
3134/**
3135 * Fixes up attributes for the specified segment register.
3136 *
3137 * @param pVCpu The cross context virtual CPU structure.
3138 * @param pSelReg The segment register that needs fixing.
3139 * @param pszRegName The register name (for logging and assertions).
3140 */
3141static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3142{
3143 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3144
3145 /*
3146 * If VT-x marks the segment as unusable, most other bits remain undefined:
3147 * - For CS the L, D and G bits have meaning.
3148 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3149 * - For the remaining data segments no bits are defined.
3150 *
3151 * The present bit and the unusable bit has been observed to be set at the
3152 * same time (the selector was supposed to be invalid as we started executing
3153 * a V8086 interrupt in ring-0).
3154 *
3155 * What should be important for the rest of the VBox code, is that the P bit is
3156 * cleared. Some of the other VBox code recognizes the unusable bit, but
3157 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3158 * safe side here, we'll strip off P and other bits we don't care about. If
3159 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3160 *
3161 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3162 */
3163#ifdef VBOX_STRICT
3164 uint32_t const uAttr = pSelReg->Attr.u;
3165#endif
3166
3167 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3168 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3169 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3170
3171#ifdef VBOX_STRICT
3172# ifndef IN_NEM_DARWIN
3173 VMMRZCallRing3Disable(pVCpu);
3174# endif
3175 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3176# ifdef DEBUG_bird
3177 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3178 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3179 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3180# endif
3181# ifndef IN_NEM_DARWIN
3182 VMMRZCallRing3Enable(pVCpu);
3183# endif
3184 NOREF(uAttr);
3185#endif
3186 RT_NOREF2(pVCpu, pszRegName);
3187}
3188
3189
3190/**
3191 * Imports a guest segment register from the current VMCS into the guest-CPU
3192 * context.
3193 *
3194 * @param pVCpu The cross context virtual CPU structure.
3195 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3196 *
3197 * @remarks Called with interrupts and/or preemption disabled.
3198 */
3199template<uint32_t const a_iSegReg>
3200DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3201{
3202 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3203 /* Check that the macros we depend upon here and in the export parenter function works: */
3204#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3205 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3206 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3207 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3208 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3209 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3210 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3211 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3212 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3213 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3214 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3215
3216 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3217
3218 uint16_t u16Sel;
3219 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3220 pSelReg->Sel = u16Sel;
3221 pSelReg->ValidSel = u16Sel;
3222
3223 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3224 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3225
3226 uint32_t u32Attr;
3227 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3228 pSelReg->Attr.u = u32Attr;
3229 if (u32Attr & X86DESCATTR_UNUSABLE)
3230 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3231
3232 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3233}
3234
3235
3236/**
3237 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3238 *
3239 * @param pVCpu The cross context virtual CPU structure.
3240 *
3241 * @remarks Called with interrupts and/or preemption disabled.
3242 */
3243DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3244{
3245 uint16_t u16Sel;
3246 uint64_t u64Base;
3247 uint32_t u32Limit, u32Attr;
3248 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3249 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3250 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3251 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3252
3253 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3254 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3255 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3256 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3257 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3258 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3259 if (u32Attr & X86DESCATTR_UNUSABLE)
3260 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3261}
3262
3263
3264/**
3265 * Imports the guest TR from the current VMCS into the guest-CPU context.
3266 *
3267 * @param pVCpu The cross context virtual CPU structure.
3268 *
3269 * @remarks Called with interrupts and/or preemption disabled.
3270 */
3271DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3272{
3273 uint16_t u16Sel;
3274 uint64_t u64Base;
3275 uint32_t u32Limit, u32Attr;
3276 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3277 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3278 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3279 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3280
3281 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3282 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3283 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3284 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3285 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3286 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3287 /* TR is the only selector that can never be unusable. */
3288 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3289}
3290
3291
3292/**
3293 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3294 *
3295 * @param pVCpu The cross context virtual CPU structure.
3296 *
3297 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3298 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3299 * instead!!!
3300 */
3301DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3302{
3303 uint64_t u64Val;
3304 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3305 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3306 {
3307 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3308 AssertRC(rc);
3309
3310 pCtx->rip = u64Val;
3311 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3312 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3313 }
3314}
3315
3316
3317/**
3318 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3319 *
3320 * @param pVCpu The cross context virtual CPU structure.
3321 * @param pVmcsInfo The VMCS info. object.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3328{
3329 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3330 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3331 {
3332 uint64_t u64Val;
3333 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3334 AssertRC(rc);
3335
3336 pCtx->rflags.u64 = u64Val;
3337#ifndef IN_NEM_DARWIN
3338 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3339 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3340 {
3341 pCtx->eflags.Bits.u1VM = 0;
3342 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3343 }
3344#else
3345 RT_NOREF(pVmcsInfo);
3346#endif
3347 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3348 }
3349}
3350
3351
3352/**
3353 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3354 * context.
3355 *
3356 * @param pVCpu The cross context virtual CPU structure.
3357 * @param pVmcsInfo The VMCS info. object.
3358 *
3359 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3360 * do not log!
3361 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3362 * instead!!!
3363 */
3364DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3365{
3366 uint32_t u32Val;
3367 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3368 if (!u32Val)
3369 {
3370 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3371 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3372 CPUMSetGuestNmiBlocking(pVCpu, false);
3373 }
3374 else
3375 {
3376 /*
3377 * We must import RIP here to set our EM interrupt-inhibited state.
3378 * We also import RFLAGS as our code that evaluates pending interrupts
3379 * before VM-entry requires it.
3380 */
3381 vmxHCImportGuestRip(pVCpu);
3382 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3383
3384 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3385 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3386 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3387 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3388
3389 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3390 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3391 }
3392}
3393
3394
3395/**
3396 * Worker for VMXR0ImportStateOnDemand.
3397 *
3398 * @returns VBox status code.
3399 * @param pVCpu The cross context virtual CPU structure.
3400 * @param pVmcsInfo The VMCS info. object.
3401 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3402 */
3403static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3404{
3405 int rc = VINF_SUCCESS;
3406 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3407 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3408 uint32_t u32Val;
3409
3410 /*
3411 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3412 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3413 * neither are other host platforms.
3414 *
3415 * Committing this temporarily as it prevents BSOD.
3416 *
3417 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3418 */
3419#ifdef RT_OS_WINDOWS
3420 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3421 return VERR_HM_IPE_1;
3422#endif
3423
3424 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3425
3426#ifndef IN_NEM_DARWIN
3427 /*
3428 * We disable interrupts to make the updating of the state and in particular
3429 * the fExtrn modification atomic wrt to preemption hooks.
3430 */
3431 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3432#endif
3433
3434 fWhat &= pCtx->fExtrn;
3435 if (fWhat)
3436 {
3437 do
3438 {
3439 if (fWhat & CPUMCTX_EXTRN_RIP)
3440 vmxHCImportGuestRip(pVCpu);
3441
3442 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3443 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3444
3445 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3446 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3447
3448 if (fWhat & CPUMCTX_EXTRN_RSP)
3449 {
3450 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3451 AssertRC(rc);
3452 }
3453
3454 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3455 {
3456 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3457#ifndef IN_NEM_DARWIN
3458 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3459#else
3460 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3461#endif
3462 if (fWhat & CPUMCTX_EXTRN_CS)
3463 {
3464 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3465 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3466 if (fRealOnV86Active)
3467 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3468 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3469 }
3470 if (fWhat & CPUMCTX_EXTRN_SS)
3471 {
3472 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3473 if (fRealOnV86Active)
3474 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3475 }
3476 if (fWhat & CPUMCTX_EXTRN_DS)
3477 {
3478 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3479 if (fRealOnV86Active)
3480 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3481 }
3482 if (fWhat & CPUMCTX_EXTRN_ES)
3483 {
3484 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3485 if (fRealOnV86Active)
3486 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3487 }
3488 if (fWhat & CPUMCTX_EXTRN_FS)
3489 {
3490 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3491 if (fRealOnV86Active)
3492 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3493 }
3494 if (fWhat & CPUMCTX_EXTRN_GS)
3495 {
3496 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3497 if (fRealOnV86Active)
3498 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3499 }
3500 }
3501
3502 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3503 {
3504 if (fWhat & CPUMCTX_EXTRN_LDTR)
3505 vmxHCImportGuestLdtr(pVCpu);
3506
3507 if (fWhat & CPUMCTX_EXTRN_GDTR)
3508 {
3509 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3510 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3511 pCtx->gdtr.cbGdt = u32Val;
3512 }
3513
3514 /* Guest IDTR. */
3515 if (fWhat & CPUMCTX_EXTRN_IDTR)
3516 {
3517 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3518 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3519 pCtx->idtr.cbIdt = u32Val;
3520 }
3521
3522 /* Guest TR. */
3523 if (fWhat & CPUMCTX_EXTRN_TR)
3524 {
3525#ifndef IN_NEM_DARWIN
3526 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3527 don't need to import that one. */
3528 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3529#endif
3530 vmxHCImportGuestTr(pVCpu);
3531 }
3532 }
3533
3534 if (fWhat & CPUMCTX_EXTRN_DR7)
3535 {
3536#ifndef IN_NEM_DARWIN
3537 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3538#endif
3539 {
3540 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3541 AssertRC(rc);
3542 }
3543 }
3544
3545 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3546 {
3547 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3548 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3549 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3550 pCtx->SysEnter.cs = u32Val;
3551 }
3552
3553#ifndef IN_NEM_DARWIN
3554 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3555 {
3556 if ( pVM->hmr0.s.fAllow64BitGuests
3557 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3558 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3559 }
3560
3561 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3562 {
3563 if ( pVM->hmr0.s.fAllow64BitGuests
3564 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3565 {
3566 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3567 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3568 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3569 }
3570 }
3571
3572 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3573 {
3574 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3575 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3576 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3577 Assert(pMsrs);
3578 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3579 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3580 for (uint32_t i = 0; i < cMsrs; i++)
3581 {
3582 uint32_t const idMsr = pMsrs[i].u32Msr;
3583 switch (idMsr)
3584 {
3585 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3586 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3587 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3588 default:
3589 {
3590 uint32_t idxLbrMsr;
3591 if (VM_IS_VMX_LBR(pVM))
3592 {
3593 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3594 {
3595 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3596 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3597 break;
3598 }
3599 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3600 {
3601 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3602 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3603 break;
3604 }
3605 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3606 {
3607 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3608 break;
3609 }
3610 /* Fallthru (no break) */
3611 }
3612 pCtx->fExtrn = 0;
3613 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3614 ASMSetFlags(fEFlags);
3615 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3616 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3617 }
3618 }
3619 }
3620 }
3621#endif
3622
3623 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3624 {
3625 if (fWhat & CPUMCTX_EXTRN_CR0)
3626 {
3627 uint64_t u64Cr0;
3628 uint64_t u64Shadow;
3629 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3630 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3631#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3632 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3633 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3634#else
3635 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3636 {
3637 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3638 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3639 }
3640 else
3641 {
3642 /*
3643 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3644 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3645 * re-construct CR0. See @bugref{9180#c95} for details.
3646 */
3647 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3648 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3649 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3650 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3651 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3652 }
3653#endif
3654#ifndef IN_NEM_DARWIN
3655 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3656#endif
3657 CPUMSetGuestCR0(pVCpu, u64Cr0);
3658#ifndef IN_NEM_DARWIN
3659 VMMRZCallRing3Enable(pVCpu);
3660#endif
3661 }
3662
3663 if (fWhat & CPUMCTX_EXTRN_CR4)
3664 {
3665 uint64_t u64Cr4;
3666 uint64_t u64Shadow;
3667 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3668 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3669#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3670 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3671 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3672#else
3673 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3674 {
3675 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3676 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3677 }
3678 else
3679 {
3680 /*
3681 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3682 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3683 * re-construct CR4. See @bugref{9180#c95} for details.
3684 */
3685 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3686 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3687 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3688 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3689 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3690 }
3691#endif
3692 pCtx->cr4 = u64Cr4;
3693 }
3694
3695 if (fWhat & CPUMCTX_EXTRN_CR3)
3696 {
3697 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3698 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3699 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3700 && CPUMIsGuestPagingEnabledEx(pCtx)))
3701 {
3702 uint64_t u64Cr3;
3703 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3704 if (pCtx->cr3 != u64Cr3)
3705 {
3706 pCtx->cr3 = u64Cr3;
3707 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3708 }
3709
3710 /*
3711 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3712 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3713 */
3714 if (CPUMIsGuestInPAEModeEx(pCtx))
3715 {
3716 X86PDPE aPaePdpes[4];
3717 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3718 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3719 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3720 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3721 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3722 {
3723 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3724 /* PGM now updates PAE PDPTEs while updating CR3. */
3725 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3726 }
3727 }
3728 }
3729 }
3730 }
3731
3732#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3733 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3734 {
3735 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3736 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3737 {
3738 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3739 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3740 if (RT_SUCCESS(rc))
3741 { /* likely */ }
3742 else
3743 break;
3744 }
3745 }
3746#endif
3747 } while (0);
3748
3749 if (RT_SUCCESS(rc))
3750 {
3751 /* Update fExtrn. */
3752 pCtx->fExtrn &= ~fWhat;
3753
3754 /* If everything has been imported, clear the HM keeper bit. */
3755 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3756 {
3757#ifndef IN_NEM_DARWIN
3758 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3759#else
3760 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3761#endif
3762 Assert(!pCtx->fExtrn);
3763 }
3764 }
3765 }
3766#ifndef IN_NEM_DARWIN
3767 else
3768 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3769
3770 /*
3771 * Restore interrupts.
3772 */
3773 ASMSetFlags(fEFlags);
3774#endif
3775
3776 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3777
3778 if (RT_SUCCESS(rc))
3779 { /* likely */ }
3780 else
3781 return rc;
3782
3783 /*
3784 * Honor any pending CR3 updates.
3785 *
3786 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3787 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3788 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3789 *
3790 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3791 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3792 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3793 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3794 *
3795 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3796 *
3797 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3798 */
3799 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3800#ifndef IN_NEM_DARWIN
3801 && VMMRZCallRing3IsEnabled(pVCpu)
3802#endif
3803 )
3804 {
3805 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3806 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3807 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3808 }
3809
3810 return VINF_SUCCESS;
3811}
3812
3813
3814/**
3815 * Check per-VM and per-VCPU force flag actions that require us to go back to
3816 * ring-3 for one reason or another.
3817 *
3818 * @returns Strict VBox status code (i.e. informational status codes too)
3819 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3820 * ring-3.
3821 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3822 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3823 * interrupts)
3824 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3825 * all EMTs to be in ring-3.
3826 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3827 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3828 * to the EM loop.
3829 *
3830 * @param pVCpu The cross context virtual CPU structure.
3831 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3832 * @param fStepping Whether we are single-stepping the guest using the
3833 * hypervisor debugger.
3834 *
3835 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3836 * is no longer in VMX non-root mode.
3837 */
3838static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3839{
3840#ifndef IN_NEM_DARWIN
3841 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3842#endif
3843
3844 /*
3845 * Update pending interrupts into the APIC's IRR.
3846 */
3847 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3848 APICUpdatePendingInterrupts(pVCpu);
3849
3850 /*
3851 * Anything pending? Should be more likely than not if we're doing a good job.
3852 */
3853 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3854 if ( !fStepping
3855 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3856 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3857 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3858 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3859 return VINF_SUCCESS;
3860
3861 /* Pending PGM C3 sync. */
3862 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3863 {
3864 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3865 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3866 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3867 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3868 if (rcStrict != VINF_SUCCESS)
3869 {
3870 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3871 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3872 return rcStrict;
3873 }
3874 }
3875
3876 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3877 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3878 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3879 {
3880 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3881 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3882 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
3883 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
3884 return rc;
3885 }
3886
3887 /* Pending VM request packets, such as hardware interrupts. */
3888 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3889 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3890 {
3891 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3892 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3893 return VINF_EM_PENDING_REQUEST;
3894 }
3895
3896 /* Pending PGM pool flushes. */
3897 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3898 {
3899 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3900 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3901 return VINF_PGM_POOL_FLUSH_PENDING;
3902 }
3903
3904 /* Pending DMA requests. */
3905 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3906 {
3907 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3908 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3909 return VINF_EM_RAW_TO_R3;
3910 }
3911
3912#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3913 /*
3914 * Pending nested-guest events.
3915 *
3916 * Please note the priority of these events are specified and important.
3917 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3918 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3919 */
3920 if (fIsNestedGuest)
3921 {
3922 /* Pending nested-guest APIC-write. */
3923 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3924 {
3925 Log4Func(("Pending nested-guest APIC-write\n"));
3926 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3927 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3928 return rcStrict;
3929 }
3930
3931 /* Pending nested-guest monitor-trap flag (MTF). */
3932 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3933 {
3934 Log4Func(("Pending nested-guest MTF\n"));
3935 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3936 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3937 return rcStrict;
3938 }
3939
3940 /* Pending nested-guest VMX-preemption timer expired. */
3941 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3942 {
3943 Log4Func(("Pending nested-guest preempt timer\n"));
3944 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3945 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3946 return rcStrict;
3947 }
3948 }
3949#else
3950 NOREF(fIsNestedGuest);
3951#endif
3952
3953 return VINF_SUCCESS;
3954}
3955
3956
3957/**
3958 * Converts any TRPM trap into a pending HM event. This is typically used when
3959 * entering from ring-3 (not longjmp returns).
3960 *
3961 * @param pVCpu The cross context virtual CPU structure.
3962 */
3963static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3964{
3965 Assert(TRPMHasTrap(pVCpu));
3966 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3967
3968 uint8_t uVector;
3969 TRPMEVENT enmTrpmEvent;
3970 uint32_t uErrCode;
3971 RTGCUINTPTR GCPtrFaultAddress;
3972 uint8_t cbInstr;
3973 bool fIcebp;
3974
3975 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3976 AssertRC(rc);
3977
3978 uint32_t u32IntInfo;
3979 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
3980 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
3981
3982 rc = TRPMResetTrap(pVCpu);
3983 AssertRC(rc);
3984 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
3985 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
3986
3987 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
3988}
3989
3990
3991/**
3992 * Converts the pending HM event into a TRPM trap.
3993 *
3994 * @param pVCpu The cross context virtual CPU structure.
3995 */
3996static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
3997{
3998 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3999
4000 /* If a trap was already pending, we did something wrong! */
4001 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4002
4003 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4004 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4005 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4006
4007 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4008
4009 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4010 AssertRC(rc);
4011
4012 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4013 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4014
4015 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4016 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4017 else
4018 {
4019 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4020 switch (uVectorType)
4021 {
4022 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4023 TRPMSetTrapDueToIcebp(pVCpu);
4024 RT_FALL_THRU();
4025 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4026 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4027 {
4028 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4029 || ( uVector == X86_XCPT_BP /* INT3 */
4030 || uVector == X86_XCPT_OF /* INTO */
4031 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4032 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4033 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4034 break;
4035 }
4036 }
4037 }
4038
4039 /* We're now done converting the pending event. */
4040 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4041}
4042
4043
4044/**
4045 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4046 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4047 *
4048 * @param pVCpu The cross context virtual CPU structure.
4049 * @param pVmcsInfo The VMCS info. object.
4050 */
4051static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4052{
4053 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4054 {
4055 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4056 {
4057 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4058 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4059 AssertRC(rc);
4060 }
4061 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4062}
4063
4064
4065/**
4066 * Clears the interrupt-window exiting control in the VMCS.
4067 *
4068 * @param pVCpu The cross context virtual CPU structure.
4069 * @param pVmcsInfo The VMCS info. object.
4070 */
4071DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4072{
4073 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4074 {
4075 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4076 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4077 AssertRC(rc);
4078 }
4079}
4080
4081
4082/**
4083 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4084 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4085 *
4086 * @param pVCpu The cross context virtual CPU structure.
4087 * @param pVmcsInfo The VMCS info. object.
4088 */
4089static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4090{
4091 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4092 {
4093 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4094 {
4095 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4096 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4097 AssertRC(rc);
4098 Log4Func(("Setup NMI-window exiting\n"));
4099 }
4100 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4101}
4102
4103
4104/**
4105 * Clears the NMI-window exiting control in the VMCS.
4106 *
4107 * @param pVCpu The cross context virtual CPU structure.
4108 * @param pVmcsInfo The VMCS info. object.
4109 */
4110DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4111{
4112 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4113 {
4114 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4115 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4116 AssertRC(rc);
4117 }
4118}
4119
4120
4121/**
4122 * Injects an event into the guest upon VM-entry by updating the relevant fields
4123 * in the VM-entry area in the VMCS.
4124 *
4125 * @returns Strict VBox status code (i.e. informational status codes too).
4126 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4127 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4128 *
4129 * @param pVCpu The cross context virtual CPU structure.
4130 * @param pVmcsInfo The VMCS info object.
4131 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4132 * @param pEvent The event being injected.
4133 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4134 * will be updated if necessary. This cannot not be NULL.
4135 * @param fStepping Whether we're single-stepping guest execution and should
4136 * return VINF_EM_DBG_STEPPED if the event is injected
4137 * directly (registers modified by us, not by hardware on
4138 * VM-entry).
4139 */
4140static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4141 bool fStepping, uint32_t *pfIntrState)
4142{
4143 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4144 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4145 Assert(pfIntrState);
4146
4147#ifdef IN_NEM_DARWIN
4148 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4149#endif
4150
4151 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4152 uint32_t u32IntInfo = pEvent->u64IntInfo;
4153 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4154 uint32_t const cbInstr = pEvent->cbInstr;
4155 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4156 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4157 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4158
4159#ifdef VBOX_STRICT
4160 /*
4161 * Validate the error-code-valid bit for hardware exceptions.
4162 * No error codes for exceptions in real-mode.
4163 *
4164 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4165 */
4166 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4167 && !CPUMIsGuestInRealModeEx(pCtx))
4168 {
4169 switch (uVector)
4170 {
4171 case X86_XCPT_PF:
4172 case X86_XCPT_DF:
4173 case X86_XCPT_TS:
4174 case X86_XCPT_NP:
4175 case X86_XCPT_SS:
4176 case X86_XCPT_GP:
4177 case X86_XCPT_AC:
4178 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4179 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4180 RT_FALL_THRU();
4181 default:
4182 break;
4183 }
4184 }
4185
4186 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4187 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4188 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4189#endif
4190
4191 RT_NOREF(uVector);
4192 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4193 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4194 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4195 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4196 {
4197 Assert(uVector <= X86_XCPT_LAST);
4198 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4199 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4200 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4201 }
4202 else
4203 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4204
4205 /*
4206 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4207 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4208 * interrupt handler in the (real-mode) guest.
4209 *
4210 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4211 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4212 */
4213 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4214 {
4215#ifndef IN_NEM_DARWIN
4216 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4217#endif
4218 {
4219 /*
4220 * For CPUs with unrestricted guest execution enabled and with the guest
4221 * in real-mode, we must not set the deliver-error-code bit.
4222 *
4223 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4224 */
4225 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4226 }
4227#ifndef IN_NEM_DARWIN
4228 else
4229 {
4230 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4231 Assert(PDMVmmDevHeapIsEnabled(pVM));
4232 Assert(pVM->hm.s.vmx.pRealModeTSS);
4233 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4234
4235 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4236 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4237 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4238 AssertRCReturn(rc2, rc2);
4239
4240 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4241 size_t const cbIdtEntry = sizeof(X86IDTR16);
4242 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4243 {
4244 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4245 if (uVector == X86_XCPT_DF)
4246 return VINF_EM_RESET;
4247
4248 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4249 No error codes for exceptions in real-mode. */
4250 if (uVector == X86_XCPT_GP)
4251 {
4252 static HMEVENT const s_EventXcptDf
4253 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4254 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4255 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4256 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4257 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4258 }
4259
4260 /*
4261 * If we're injecting an event with no valid IDT entry, inject a #GP.
4262 * No error codes for exceptions in real-mode.
4263 *
4264 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4265 */
4266 static HMEVENT const s_EventXcptGp
4267 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4268 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4269 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4270 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4271 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4272 }
4273
4274 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4275 uint16_t uGuestIp = pCtx->ip;
4276 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4277 {
4278 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4279 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4280 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4281 }
4282 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4283 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4284
4285 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4286 X86IDTR16 IdtEntry;
4287 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4288 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4289 AssertRCReturn(rc2, rc2);
4290
4291 /* Construct the stack frame for the interrupt/exception handler. */
4292 VBOXSTRICTRC rcStrict;
4293 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4294 if (rcStrict == VINF_SUCCESS)
4295 {
4296 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4297 if (rcStrict == VINF_SUCCESS)
4298 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4299 }
4300
4301 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4302 if (rcStrict == VINF_SUCCESS)
4303 {
4304 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4305 pCtx->rip = IdtEntry.offSel;
4306 pCtx->cs.Sel = IdtEntry.uSel;
4307 pCtx->cs.ValidSel = IdtEntry.uSel;
4308 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4309 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4310 && uVector == X86_XCPT_PF)
4311 pCtx->cr2 = GCPtrFault;
4312
4313 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4314 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4315 | HM_CHANGED_GUEST_RSP);
4316
4317 /*
4318 * If we delivered a hardware exception (other than an NMI) and if there was
4319 * block-by-STI in effect, we should clear it.
4320 */
4321 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4322 {
4323 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4324 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4325 Log4Func(("Clearing inhibition due to STI\n"));
4326 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4327 }
4328
4329 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4330 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4331
4332 /*
4333 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4334 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4335 */
4336 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4337
4338 /*
4339 * If we eventually support nested-guest execution without unrestricted guest execution,
4340 * we should set fInterceptEvents here.
4341 */
4342 Assert(!fIsNestedGuest);
4343
4344 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4345 if (fStepping)
4346 rcStrict = VINF_EM_DBG_STEPPED;
4347 }
4348 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4349 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4350 return rcStrict;
4351 }
4352#else
4353 RT_NOREF(pVmcsInfo);
4354#endif
4355 }
4356
4357 /*
4358 * Validate.
4359 */
4360 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4361 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4362
4363 /*
4364 * Inject the event into the VMCS.
4365 */
4366 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4367 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4368 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4369 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4370 AssertRC(rc);
4371
4372 /*
4373 * Update guest CR2 if this is a page-fault.
4374 */
4375 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4376 pCtx->cr2 = GCPtrFault;
4377
4378 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4379 return VINF_SUCCESS;
4380}
4381
4382
4383/**
4384 * Evaluates the event to be delivered to the guest and sets it as the pending
4385 * event.
4386 *
4387 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4388 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4389 * NOT restore these force-flags.
4390 *
4391 * @returns Strict VBox status code (i.e. informational status codes too).
4392 * @param pVCpu The cross context virtual CPU structure.
4393 * @param pVmcsInfo The VMCS information structure.
4394 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4395 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4396 */
4397static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4398{
4399 Assert(pfIntrState);
4400 Assert(!TRPMHasTrap(pVCpu));
4401
4402 /*
4403 * Compute/update guest-interruptibility state related FFs.
4404 * The FFs will be used below while evaluating events to be injected.
4405 */
4406 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4407
4408 /*
4409 * Evaluate if a new event needs to be injected.
4410 * An event that's already pending has already performed all necessary checks.
4411 */
4412 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4413 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4414 {
4415 /** @todo SMI. SMIs take priority over NMIs. */
4416
4417 /*
4418 * NMIs.
4419 * NMIs take priority over external interrupts.
4420 */
4421#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4422 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4423#endif
4424 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4425 {
4426 /*
4427 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4428 *
4429 * For a nested-guest, the FF always indicates the outer guest's ability to
4430 * receive an NMI while the guest-interruptibility state bit depends on whether
4431 * the nested-hypervisor is using virtual-NMIs.
4432 */
4433 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4434 {
4435#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4436 if ( fIsNestedGuest
4437 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4438 return IEMExecVmxVmexitXcptNmi(pVCpu);
4439#endif
4440 vmxHCSetPendingXcptNmi(pVCpu);
4441 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4442 Log4Func(("NMI pending injection\n"));
4443
4444 /* We've injected the NMI, bail. */
4445 return VINF_SUCCESS;
4446 }
4447 else if (!fIsNestedGuest)
4448 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4449 }
4450
4451 /*
4452 * External interrupts (PIC/APIC).
4453 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4454 * We cannot re-request the interrupt from the controller again.
4455 */
4456 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4457 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4458 {
4459 Assert(!DBGFIsStepping(pVCpu));
4460 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4461 AssertRC(rc);
4462
4463 /*
4464 * We must not check EFLAGS directly when executing a nested-guest, use
4465 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4466 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4467 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4468 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4469 *
4470 * See Intel spec. 25.4.1 "Event Blocking".
4471 */
4472 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4473 {
4474#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4475 if ( fIsNestedGuest
4476 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4477 {
4478 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4479 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4480 return rcStrict;
4481 }
4482#endif
4483 uint8_t u8Interrupt;
4484 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4485 if (RT_SUCCESS(rc))
4486 {
4487#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4488 if ( fIsNestedGuest
4489 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4490 {
4491 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4492 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4493 return rcStrict;
4494 }
4495#endif
4496 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4497 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4498 }
4499 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4500 {
4501 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4502
4503 if ( !fIsNestedGuest
4504 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4505 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4506 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4507
4508 /*
4509 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4510 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4511 * need to re-set this force-flag here.
4512 */
4513 }
4514 else
4515 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4516
4517 /* We've injected the interrupt or taken necessary action, bail. */
4518 return VINF_SUCCESS;
4519 }
4520 if (!fIsNestedGuest)
4521 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4522 }
4523 }
4524 else if (!fIsNestedGuest)
4525 {
4526 /*
4527 * An event is being injected or we are in an interrupt shadow. Check if another event is
4528 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4529 * the pending event.
4530 */
4531 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4532 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4533 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4534 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4535 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4536 }
4537 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4538
4539 return VINF_SUCCESS;
4540}
4541
4542
4543/**
4544 * Injects any pending events into the guest if the guest is in a state to
4545 * receive them.
4546 *
4547 * @returns Strict VBox status code (i.e. informational status codes too).
4548 * @param pVCpu The cross context virtual CPU structure.
4549 * @param pVmcsInfo The VMCS information structure.
4550 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4551 * @param fIntrState The VT-x guest-interruptibility state.
4552 * @param fStepping Whether we are single-stepping the guest using the
4553 * hypervisor debugger and should return
4554 * VINF_EM_DBG_STEPPED if the event was dispatched
4555 * directly.
4556 */
4557static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4558 uint32_t fIntrState, bool fStepping)
4559{
4560 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4561#ifndef IN_NEM_DARWIN
4562 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4563#endif
4564
4565#ifdef VBOX_STRICT
4566 /*
4567 * Verify guest-interruptibility state.
4568 *
4569 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4570 * since injecting an event may modify the interruptibility state and we must thus always
4571 * use fIntrState.
4572 */
4573 {
4574 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4575 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4576 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4577 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4578 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4579 Assert(!TRPMHasTrap(pVCpu));
4580 NOREF(fBlockMovSS); NOREF(fBlockSti);
4581 }
4582#endif
4583
4584 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4585 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4586 {
4587 /*
4588 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4589 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4590 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4591 *
4592 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4593 */
4594 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4595#ifdef VBOX_STRICT
4596 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4597 {
4598 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4599 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4600 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4601 }
4602 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4603 {
4604 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4605 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4606 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4607 }
4608#endif
4609 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4610 uIntType));
4611
4612 /*
4613 * Inject the event and get any changes to the guest-interruptibility state.
4614 *
4615 * The guest-interruptibility state may need to be updated if we inject the event
4616 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4617 */
4618 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4619 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4620
4621 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4622 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4623 else
4624 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4625 }
4626
4627 /*
4628 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4629 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4630 */
4631 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4632 && !fIsNestedGuest)
4633 {
4634 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4635
4636 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4637 {
4638 /*
4639 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4640 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4641 */
4642 Assert(!DBGFIsStepping(pVCpu));
4643 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4644 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4645 AssertRC(rc);
4646 }
4647 else
4648 {
4649 /*
4650 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4651 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4652 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4653 * we use MTF, so just make sure it's called before executing guest-code.
4654 */
4655 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4656 }
4657 }
4658 /* else: for nested-guest currently handling while merging controls. */
4659
4660 /*
4661 * Finally, update the guest-interruptibility state.
4662 *
4663 * This is required for the real-on-v86 software interrupt injection, for
4664 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4665 */
4666 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4667 AssertRC(rc);
4668
4669 /*
4670 * There's no need to clear the VM-entry interruption-information field here if we're not
4671 * injecting anything. VT-x clears the valid bit on every VM-exit.
4672 *
4673 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4674 */
4675
4676 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4677 return rcStrict;
4678}
4679
4680
4681/**
4682 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4683 * and update error record fields accordingly.
4684 *
4685 * @returns VMX_IGS_* error codes.
4686 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4687 * wrong with the guest state.
4688 *
4689 * @param pVCpu The cross context virtual CPU structure.
4690 * @param pVmcsInfo The VMCS info. object.
4691 *
4692 * @remarks This function assumes our cache of the VMCS controls
4693 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4694 */
4695static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4696{
4697#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4698#define HMVMX_CHECK_BREAK(expr, err) do { \
4699 if (!(expr)) { uError = (err); break; } \
4700 } while (0)
4701
4702 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4703 uint32_t uError = VMX_IGS_ERROR;
4704 uint32_t u32IntrState = 0;
4705#ifndef IN_NEM_DARWIN
4706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4707 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4708#else
4709 bool const fUnrestrictedGuest = true;
4710#endif
4711 do
4712 {
4713 int rc;
4714
4715 /*
4716 * Guest-interruptibility state.
4717 *
4718 * Read this first so that any check that fails prior to those that actually
4719 * require the guest-interruptibility state would still reflect the correct
4720 * VMCS value and avoids causing further confusion.
4721 */
4722 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4723 AssertRC(rc);
4724
4725 uint32_t u32Val;
4726 uint64_t u64Val;
4727
4728 /*
4729 * CR0.
4730 */
4731 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4732 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4733 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4734 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4735 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4736 if (fUnrestrictedGuest)
4737 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4738
4739 uint64_t u64GuestCr0;
4740 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4741 AssertRC(rc);
4742 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4743 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4744 if ( !fUnrestrictedGuest
4745 && (u64GuestCr0 & X86_CR0_PG)
4746 && !(u64GuestCr0 & X86_CR0_PE))
4747 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4748
4749 /*
4750 * CR4.
4751 */
4752 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4753 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4754 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4755
4756 uint64_t u64GuestCr4;
4757 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4758 AssertRC(rc);
4759 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4760 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4761
4762 /*
4763 * IA32_DEBUGCTL MSR.
4764 */
4765 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4766 AssertRC(rc);
4767 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4768 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4769 {
4770 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4771 }
4772 uint64_t u64DebugCtlMsr = u64Val;
4773
4774#ifdef VBOX_STRICT
4775 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4776 AssertRC(rc);
4777 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4778#endif
4779 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4780
4781 /*
4782 * RIP and RFLAGS.
4783 */
4784 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4785 AssertRC(rc);
4786 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4787 if ( !fLongModeGuest
4788 || !pCtx->cs.Attr.n.u1Long)
4789 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4790 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4791 * must be identical if the "IA-32e mode guest" VM-entry
4792 * control is 1 and CS.L is 1. No check applies if the
4793 * CPU supports 64 linear-address bits. */
4794
4795 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4796 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4797 AssertRC(rc);
4798 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4799 VMX_IGS_RFLAGS_RESERVED);
4800 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4801 uint32_t const u32Eflags = u64Val;
4802
4803 if ( fLongModeGuest
4804 || ( fUnrestrictedGuest
4805 && !(u64GuestCr0 & X86_CR0_PE)))
4806 {
4807 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4808 }
4809
4810 uint32_t u32EntryInfo;
4811 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4812 AssertRC(rc);
4813 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4814 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4815
4816 /*
4817 * 64-bit checks.
4818 */
4819 if (fLongModeGuest)
4820 {
4821 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4822 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4823 }
4824
4825 if ( !fLongModeGuest
4826 && (u64GuestCr4 & X86_CR4_PCIDE))
4827 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4828
4829 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4830 * 51:32 beyond the processor's physical-address width are 0. */
4831
4832 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4833 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4834 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4835
4836#ifndef IN_NEM_DARWIN
4837 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4838 AssertRC(rc);
4839 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4840
4841 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4842 AssertRC(rc);
4843 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4844#endif
4845
4846 /*
4847 * PERF_GLOBAL MSR.
4848 */
4849 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4850 {
4851 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4852 AssertRC(rc);
4853 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4854 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4855 }
4856
4857 /*
4858 * PAT MSR.
4859 */
4860 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4861 {
4862 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4863 AssertRC(rc);
4864 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4865 for (unsigned i = 0; i < 8; i++)
4866 {
4867 uint8_t u8Val = (u64Val & 0xff);
4868 if ( u8Val != 0 /* UC */
4869 && u8Val != 1 /* WC */
4870 && u8Val != 4 /* WT */
4871 && u8Val != 5 /* WP */
4872 && u8Val != 6 /* WB */
4873 && u8Val != 7 /* UC- */)
4874 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4875 u64Val >>= 8;
4876 }
4877 }
4878
4879 /*
4880 * EFER MSR.
4881 */
4882 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4883 {
4884 Assert(g_fHmVmxSupportsVmcsEfer);
4885 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4886 AssertRC(rc);
4887 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4888 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4889 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4890 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4891 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4892 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4893 * iemVmxVmentryCheckGuestState(). */
4894 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4895 || !(u64GuestCr0 & X86_CR0_PG)
4896 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4897 VMX_IGS_EFER_LMA_LME_MISMATCH);
4898 }
4899
4900 /*
4901 * Segment registers.
4902 */
4903 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4904 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4905 if (!(u32Eflags & X86_EFL_VM))
4906 {
4907 /* CS */
4908 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4909 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4910 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4911 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4912 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4913 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4914 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4915 /* CS cannot be loaded with NULL in protected mode. */
4916 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4917 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4918 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4919 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4920 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4921 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4922 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4923 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4924 else
4925 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4926
4927 /* SS */
4928 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4929 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4930 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4931 if ( !(pCtx->cr0 & X86_CR0_PE)
4932 || pCtx->cs.Attr.n.u4Type == 3)
4933 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4934
4935 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4936 {
4937 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4938 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4939 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4940 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4941 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4942 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4943 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4944 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4945 }
4946
4947 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4948 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4949 {
4950 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4951 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4952 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4953 || pCtx->ds.Attr.n.u4Type > 11
4954 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4955 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4956 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4957 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4958 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4959 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4960 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4961 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4962 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4963 }
4964 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4965 {
4966 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4967 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4968 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4969 || pCtx->es.Attr.n.u4Type > 11
4970 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4971 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4972 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4973 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4974 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4975 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4976 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4977 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4978 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
4979 }
4980 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4981 {
4982 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
4983 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
4984 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4985 || pCtx->fs.Attr.n.u4Type > 11
4986 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
4987 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
4988 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
4989 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4990 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
4991 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
4992 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
4993 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4994 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
4995 }
4996 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4997 {
4998 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
4999 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5000 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5001 || pCtx->gs.Attr.n.u4Type > 11
5002 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5003 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5004 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5005 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5006 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5007 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5008 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5009 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5010 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5011 }
5012 /* 64-bit capable CPUs. */
5013 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5014 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5015 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5016 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5017 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5018 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5019 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5020 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5021 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5022 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5023 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5024 }
5025 else
5026 {
5027 /* V86 mode checks. */
5028 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5029 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5030 {
5031 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5032 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5033 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5034 }
5035 else
5036 {
5037 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5038 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5039 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5040 }
5041
5042 /* CS */
5043 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5044 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5045 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5046 /* SS */
5047 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5048 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5049 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5050 /* DS */
5051 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5052 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5053 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5054 /* ES */
5055 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5056 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5057 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5058 /* FS */
5059 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5060 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5061 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5062 /* GS */
5063 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5064 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5065 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5066 /* 64-bit capable CPUs. */
5067 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5068 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5069 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5070 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5071 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5072 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5073 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5074 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5075 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5076 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5077 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5078 }
5079
5080 /*
5081 * TR.
5082 */
5083 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5084 /* 64-bit capable CPUs. */
5085 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5086 if (fLongModeGuest)
5087 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5088 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5089 else
5090 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5091 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5092 VMX_IGS_TR_ATTR_TYPE_INVALID);
5093 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5094 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5095 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5096 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5097 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5098 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5099 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5100 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5101
5102 /*
5103 * GDTR and IDTR (64-bit capable checks).
5104 */
5105 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5106 AssertRC(rc);
5107 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5108
5109 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5110 AssertRC(rc);
5111 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5112
5113 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5114 AssertRC(rc);
5115 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5116
5117 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5118 AssertRC(rc);
5119 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5120
5121 /*
5122 * Guest Non-Register State.
5123 */
5124 /* Activity State. */
5125 uint32_t u32ActivityState;
5126 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5127 AssertRC(rc);
5128 HMVMX_CHECK_BREAK( !u32ActivityState
5129 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5130 VMX_IGS_ACTIVITY_STATE_INVALID);
5131 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5132 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5133
5134 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5135 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5136 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5137
5138 /** @todo Activity state and injecting interrupts. Left as a todo since we
5139 * currently don't use activity states but ACTIVE. */
5140
5141 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5142 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5143
5144 /* Guest interruptibility-state. */
5145 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5146 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5147 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5148 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5149 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5150 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5151 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5152 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5153 {
5154 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5155 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5156 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5157 }
5158 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5159 {
5160 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5161 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5162 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5163 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5164 }
5165 /** @todo Assumes the processor is not in SMM. */
5166 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5167 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5168 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5169 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5170 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5171 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5172 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5173 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5174
5175 /* Pending debug exceptions. */
5176 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5177 AssertRC(rc);
5178 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5179 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5180 u32Val = u64Val; /* For pending debug exceptions checks below. */
5181
5182 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5183 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5184 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5185 {
5186 if ( (u32Eflags & X86_EFL_TF)
5187 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5188 {
5189 /* Bit 14 is PendingDebug.BS. */
5190 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5191 }
5192 if ( !(u32Eflags & X86_EFL_TF)
5193 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5194 {
5195 /* Bit 14 is PendingDebug.BS. */
5196 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5197 }
5198 }
5199
5200#ifndef IN_NEM_DARWIN
5201 /* VMCS link pointer. */
5202 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5203 AssertRC(rc);
5204 if (u64Val != UINT64_C(0xffffffffffffffff))
5205 {
5206 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5207 /** @todo Bits beyond the processor's physical-address width MBZ. */
5208 /** @todo SMM checks. */
5209 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5210 Assert(pVmcsInfo->pvShadowVmcs);
5211 VMXVMCSREVID VmcsRevId;
5212 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5213 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5214 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5215 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5216 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5217 }
5218
5219 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5220 * not using nested paging? */
5221 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5222 && !fLongModeGuest
5223 && CPUMIsGuestInPAEModeEx(pCtx))
5224 {
5225 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5226 AssertRC(rc);
5227 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5228
5229 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5230 AssertRC(rc);
5231 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5232
5233 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5234 AssertRC(rc);
5235 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5236
5237 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5238 AssertRC(rc);
5239 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5240 }
5241#endif
5242
5243 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5244 if (uError == VMX_IGS_ERROR)
5245 uError = VMX_IGS_REASON_NOT_FOUND;
5246 } while (0);
5247
5248 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5249 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5250 return uError;
5251
5252#undef HMVMX_ERROR_BREAK
5253#undef HMVMX_CHECK_BREAK
5254}
5255
5256
5257#ifndef HMVMX_USE_FUNCTION_TABLE
5258/**
5259 * Handles a guest VM-exit from hardware-assisted VMX execution.
5260 *
5261 * @returns Strict VBox status code (i.e. informational status codes too).
5262 * @param pVCpu The cross context virtual CPU structure.
5263 * @param pVmxTransient The VMX-transient structure.
5264 */
5265DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5266{
5267#ifdef DEBUG_ramshankar
5268# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5269 do { \
5270 if (a_fSave != 0) \
5271 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5272 VBOXSTRICTRC rcStrict = a_CallExpr; \
5273 if (a_fSave != 0) \
5274 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5275 return rcStrict; \
5276 } while (0)
5277#else
5278# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5279#endif
5280 uint32_t const uExitReason = pVmxTransient->uExitReason;
5281 switch (uExitReason)
5282 {
5283 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5284 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5285 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5286 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5287 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5288 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5289 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5290 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5291 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5292 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5293 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5294 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5295 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5296 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5297 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5298 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5299 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5300 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5301 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5302 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5303 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5304 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5305 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5306 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5307 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5308 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5309 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5310 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5311 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5312 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5313#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5314 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5315 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5316 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5317 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5318 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5319 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5320 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5321 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5322 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5323 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5324#else
5325 case VMX_EXIT_VMCLEAR:
5326 case VMX_EXIT_VMLAUNCH:
5327 case VMX_EXIT_VMPTRLD:
5328 case VMX_EXIT_VMPTRST:
5329 case VMX_EXIT_VMREAD:
5330 case VMX_EXIT_VMRESUME:
5331 case VMX_EXIT_VMWRITE:
5332 case VMX_EXIT_VMXOFF:
5333 case VMX_EXIT_VMXON:
5334 case VMX_EXIT_INVVPID:
5335 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5336#endif
5337#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5338 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5339#else
5340 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5341#endif
5342
5343 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5344 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5345 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5346
5347 case VMX_EXIT_INIT_SIGNAL:
5348 case VMX_EXIT_SIPI:
5349 case VMX_EXIT_IO_SMI:
5350 case VMX_EXIT_SMI:
5351 case VMX_EXIT_ERR_MSR_LOAD:
5352 case VMX_EXIT_ERR_MACHINE_CHECK:
5353 case VMX_EXIT_PML_FULL:
5354 case VMX_EXIT_VIRTUALIZED_EOI:
5355 case VMX_EXIT_GDTR_IDTR_ACCESS:
5356 case VMX_EXIT_LDTR_TR_ACCESS:
5357 case VMX_EXIT_APIC_WRITE:
5358 case VMX_EXIT_RDRAND:
5359 case VMX_EXIT_RSM:
5360 case VMX_EXIT_VMFUNC:
5361 case VMX_EXIT_ENCLS:
5362 case VMX_EXIT_RDSEED:
5363 case VMX_EXIT_XSAVES:
5364 case VMX_EXIT_XRSTORS:
5365 case VMX_EXIT_UMWAIT:
5366 case VMX_EXIT_TPAUSE:
5367 case VMX_EXIT_LOADIWKEY:
5368 default:
5369 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5370 }
5371#undef VMEXIT_CALL_RET
5372}
5373#endif /* !HMVMX_USE_FUNCTION_TABLE */
5374
5375
5376#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5377/**
5378 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5379 *
5380 * @returns Strict VBox status code (i.e. informational status codes too).
5381 * @param pVCpu The cross context virtual CPU structure.
5382 * @param pVmxTransient The VMX-transient structure.
5383 */
5384DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5385{
5386 uint32_t const uExitReason = pVmxTransient->uExitReason;
5387 switch (uExitReason)
5388 {
5389# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5390 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5391 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5392# else
5393 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5394 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5395# endif
5396 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5397 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5398 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5399
5400 /*
5401 * We shouldn't direct host physical interrupts to the nested-guest.
5402 */
5403 case VMX_EXIT_EXT_INT:
5404 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5405
5406 /*
5407 * Instructions that cause VM-exits unconditionally or the condition is
5408 * always taken solely from the nested hypervisor (meaning if the VM-exit
5409 * happens, it's guaranteed to be a nested-guest VM-exit).
5410 *
5411 * - Provides VM-exit instruction length ONLY.
5412 */
5413 case VMX_EXIT_CPUID: /* Unconditional. */
5414 case VMX_EXIT_VMCALL:
5415 case VMX_EXIT_GETSEC:
5416 case VMX_EXIT_INVD:
5417 case VMX_EXIT_XSETBV:
5418 case VMX_EXIT_VMLAUNCH:
5419 case VMX_EXIT_VMRESUME:
5420 case VMX_EXIT_VMXOFF:
5421 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5422 case VMX_EXIT_VMFUNC:
5423 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5424
5425 /*
5426 * Instructions that cause VM-exits unconditionally or the condition is
5427 * always taken solely from the nested hypervisor (meaning if the VM-exit
5428 * happens, it's guaranteed to be a nested-guest VM-exit).
5429 *
5430 * - Provides VM-exit instruction length.
5431 * - Provides VM-exit information.
5432 * - Optionally provides Exit qualification.
5433 *
5434 * Since Exit qualification is 0 for all VM-exits where it is not
5435 * applicable, reading and passing it to the guest should produce
5436 * defined behavior.
5437 *
5438 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5439 */
5440 case VMX_EXIT_INVEPT: /* Unconditional. */
5441 case VMX_EXIT_INVVPID:
5442 case VMX_EXIT_VMCLEAR:
5443 case VMX_EXIT_VMPTRLD:
5444 case VMX_EXIT_VMPTRST:
5445 case VMX_EXIT_VMXON:
5446 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5447 case VMX_EXIT_LDTR_TR_ACCESS:
5448 case VMX_EXIT_RDRAND:
5449 case VMX_EXIT_RDSEED:
5450 case VMX_EXIT_XSAVES:
5451 case VMX_EXIT_XRSTORS:
5452 case VMX_EXIT_UMWAIT:
5453 case VMX_EXIT_TPAUSE:
5454 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5455
5456 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5457 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5458 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5459 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5460 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5461 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5462 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5463 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5464 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5465 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5466 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5467 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5468 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5469 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5470 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5471 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5472 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5473 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5474 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5475
5476 case VMX_EXIT_PREEMPT_TIMER:
5477 {
5478 /** @todo NSTVMX: Preempt timer. */
5479 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5480 }
5481
5482 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5483 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5484
5485 case VMX_EXIT_VMREAD:
5486 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5487
5488 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5490
5491 case VMX_EXIT_INIT_SIGNAL:
5492 case VMX_EXIT_SIPI:
5493 case VMX_EXIT_IO_SMI:
5494 case VMX_EXIT_SMI:
5495 case VMX_EXIT_ERR_MSR_LOAD:
5496 case VMX_EXIT_ERR_MACHINE_CHECK:
5497 case VMX_EXIT_PML_FULL:
5498 case VMX_EXIT_RSM:
5499 default:
5500 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5501 }
5502}
5503#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5504
5505
5506/** @name VM-exit helpers.
5507 * @{
5508 */
5509/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5510/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5511/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5512
5513/** Macro for VM-exits called unexpectedly. */
5514#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5515 do { \
5516 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5517 return VERR_VMX_UNEXPECTED_EXIT; \
5518 } while (0)
5519
5520#ifdef VBOX_STRICT
5521# ifndef IN_NEM_DARWIN
5522/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5523# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5524 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5525
5526# define HMVMX_ASSERT_PREEMPT_CPUID() \
5527 do { \
5528 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5529 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5530 } while (0)
5531
5532# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5533 do { \
5534 AssertPtr((a_pVCpu)); \
5535 AssertPtr((a_pVmxTransient)); \
5536 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
5537 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
5538 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
5539 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
5540 Assert((a_pVmxTransient)->pVmcsInfo); \
5541 Assert(ASMIntAreEnabled()); \
5542 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5543 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5544 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5545 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5546 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5547 HMVMX_ASSERT_PREEMPT_CPUID(); \
5548 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5549 } while (0)
5550# else
5551# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5552# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5553# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5554 do { \
5555 AssertPtr((a_pVCpu)); \
5556 AssertPtr((a_pVmxTransient)); \
5557 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
5558 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
5559 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
5560 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
5561 Assert((a_pVmxTransient)->pVmcsInfo); \
5562 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5563 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5564 } while (0)
5565# endif
5566
5567# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5568 do { \
5569 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5570 Assert((a_pVmxTransient)->fIsNestedGuest); \
5571 } while (0)
5572
5573# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5574 do { \
5575 Log4Func(("\n")); \
5576 } while (0)
5577#else
5578# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5579 do { \
5580 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5581 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5582 } while (0)
5583
5584# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5585 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5586
5587# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5588#endif
5589
5590#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5591/** Macro that does the necessary privilege checks and intercepted VM-exits for
5592 * guests that attempted to execute a VMX instruction. */
5593# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5594 do \
5595 { \
5596 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5597 if (rcStrictTmp == VINF_SUCCESS) \
5598 { /* likely */ } \
5599 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5600 { \
5601 Assert((a_pVCpu)->hm.s.Event.fPending); \
5602 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5603 return VINF_SUCCESS; \
5604 } \
5605 else \
5606 { \
5607 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5608 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5609 } \
5610 } while (0)
5611
5612/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5613# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5614 do \
5615 { \
5616 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5617 (a_pGCPtrEffAddr)); \
5618 if (rcStrictTmp == VINF_SUCCESS) \
5619 { /* likely */ } \
5620 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5621 { \
5622 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5623 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5624 NOREF(uXcptTmp); \
5625 return VINF_SUCCESS; \
5626 } \
5627 else \
5628 { \
5629 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5630 return rcStrictTmp; \
5631 } \
5632 } while (0)
5633#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5634
5635
5636/**
5637 * Advances the guest RIP by the specified number of bytes.
5638 *
5639 * @param pVCpu The cross context virtual CPU structure.
5640 * @param cbInstr Number of bytes to advance the RIP by.
5641 *
5642 * @remarks No-long-jump zone!!!
5643 */
5644DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5645{
5646 /* Advance the RIP. */
5647 pVCpu->cpum.GstCtx.rip += cbInstr;
5648 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5649
5650 /* Update interrupt inhibition. */
5651 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5652 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5653 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5654}
5655
5656
5657/**
5658 * Advances the guest RIP after reading it from the VMCS.
5659 *
5660 * @returns VBox status code, no informational status codes.
5661 * @param pVCpu The cross context virtual CPU structure.
5662 * @param pVmxTransient The VMX-transient structure.
5663 *
5664 * @remarks No-long-jump zone!!!
5665 */
5666static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5667{
5668 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
5669 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5670 AssertRCReturn(rc, rc);
5671
5672 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5673 return VINF_SUCCESS;
5674}
5675
5676
5677/**
5678 * Handle a condition that occurred while delivering an event through the guest or
5679 * nested-guest IDT.
5680 *
5681 * @returns Strict VBox status code (i.e. informational status codes too).
5682 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5683 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5684 * to continue execution of the guest which will delivery the \#DF.
5685 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5686 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5687 *
5688 * @param pVCpu The cross context virtual CPU structure.
5689 * @param pVmxTransient The VMX-transient structure.
5690 *
5691 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5692 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5693 * is due to an EPT violation, PML full or SPP-related event.
5694 *
5695 * @remarks No-long-jump zone!!!
5696 */
5697static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5698{
5699 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5700 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5701 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5702 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5703 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5704 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5705
5706 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5707 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5708 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5709 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5710 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5711 {
5712 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5713 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5714
5715 /*
5716 * If the event was a software interrupt (generated with INT n) or a software exception
5717 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5718 * can handle the VM-exit and continue guest execution which will re-execute the
5719 * instruction rather than re-injecting the exception, as that can cause premature
5720 * trips to ring-3 before injection and involve TRPM which currently has no way of
5721 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5722 * the problem).
5723 */
5724 IEMXCPTRAISE enmRaise;
5725 IEMXCPTRAISEINFO fRaiseInfo;
5726 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5727 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5728 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5729 {
5730 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5731 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5732 }
5733 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5734 {
5735 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5736 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5737 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5738
5739 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5740 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5741
5742 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5743
5744 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5745 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5746 {
5747 pVmxTransient->fVectoringPF = true;
5748 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5749 }
5750 }
5751 else
5752 {
5753 /*
5754 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5755 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5756 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5757 */
5758 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5759 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5760 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5761 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5762 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5763 }
5764
5765 /*
5766 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5767 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5768 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5769 * subsequent VM-entry would fail, see @bugref{7445}.
5770 *
5771 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5772 */
5773 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5774 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5775 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5776 && CPUMIsGuestNmiBlocking(pVCpu))
5777 {
5778 CPUMSetGuestNmiBlocking(pVCpu, false);
5779 }
5780
5781 switch (enmRaise)
5782 {
5783 case IEMXCPTRAISE_CURRENT_XCPT:
5784 {
5785 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5786 Assert(rcStrict == VINF_SUCCESS);
5787 break;
5788 }
5789
5790 case IEMXCPTRAISE_PREV_EVENT:
5791 {
5792 uint32_t u32ErrCode;
5793 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5794 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5795 else
5796 u32ErrCode = 0;
5797
5798 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5799 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5800 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
5801 pVCpu->cpum.GstCtx.cr2);
5802
5803 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5804 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5805 Assert(rcStrict == VINF_SUCCESS);
5806 break;
5807 }
5808
5809 case IEMXCPTRAISE_REEXEC_INSTR:
5810 Assert(rcStrict == VINF_SUCCESS);
5811 break;
5812
5813 case IEMXCPTRAISE_DOUBLE_FAULT:
5814 {
5815 /*
5816 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
5817 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5818 */
5819 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5820 {
5821 pVmxTransient->fVectoringDoublePF = true;
5822 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5823 pVCpu->cpum.GstCtx.cr2));
5824 rcStrict = VINF_SUCCESS;
5825 }
5826 else
5827 {
5828 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5829 vmxHCSetPendingXcptDF(pVCpu);
5830 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5831 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5832 rcStrict = VINF_HM_DOUBLE_FAULT;
5833 }
5834 break;
5835 }
5836
5837 case IEMXCPTRAISE_TRIPLE_FAULT:
5838 {
5839 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5840 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5841 rcStrict = VINF_EM_RESET;
5842 break;
5843 }
5844
5845 case IEMXCPTRAISE_CPU_HANG:
5846 {
5847 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5848 rcStrict = VERR_EM_GUEST_CPU_HANG;
5849 break;
5850 }
5851
5852 default:
5853 {
5854 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5855 rcStrict = VERR_VMX_IPE_2;
5856 break;
5857 }
5858 }
5859 }
5860 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5861 && !CPUMIsGuestNmiBlocking(pVCpu))
5862 {
5863 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5864 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5865 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5866 {
5867 /*
5868 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5869 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5870 * that virtual NMIs remain blocked until the IRET execution is completed.
5871 *
5872 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5873 */
5874 CPUMSetGuestNmiBlocking(pVCpu, true);
5875 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5876 }
5877 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5878 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5879 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5880 {
5881 /*
5882 * Execution of IRET caused an EPT violation, page-modification log-full event or
5883 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5884 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5885 * that virtual NMIs remain blocked until the IRET execution is completed.
5886 *
5887 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5888 */
5889 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5890 {
5891 CPUMSetGuestNmiBlocking(pVCpu, true);
5892 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5893 }
5894 }
5895 }
5896
5897 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5898 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5899 return rcStrict;
5900}
5901
5902
5903#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5904/**
5905 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5906 * guest attempting to execute a VMX instruction.
5907 *
5908 * @returns Strict VBox status code (i.e. informational status codes too).
5909 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5910 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5911 *
5912 * @param pVCpu The cross context virtual CPU structure.
5913 * @param uExitReason The VM-exit reason.
5914 *
5915 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5916 * @remarks No-long-jump zone!!!
5917 */
5918static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5919{
5920 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5921 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5922
5923 /*
5924 * The physical CPU would have already checked the CPU mode/code segment.
5925 * We shall just assert here for paranoia.
5926 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5927 */
5928 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5929 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5930 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5931
5932 if (uExitReason == VMX_EXIT_VMXON)
5933 {
5934 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5935
5936 /*
5937 * We check CR4.VMXE because it is required to be always set while in VMX operation
5938 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5939 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5940 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5941 */
5942 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5943 {
5944 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5945 vmxHCSetPendingXcptUD(pVCpu);
5946 return VINF_HM_PENDING_XCPT;
5947 }
5948 }
5949 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5950 {
5951 /*
5952 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5953 * (other than VMXON), we need to raise a #UD.
5954 */
5955 Log4Func(("Not in VMX root mode -> #UD\n"));
5956 vmxHCSetPendingXcptUD(pVCpu);
5957 return VINF_HM_PENDING_XCPT;
5958 }
5959
5960 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5961 return VINF_SUCCESS;
5962}
5963
5964
5965/**
5966 * Decodes the memory operand of an instruction that caused a VM-exit.
5967 *
5968 * The Exit qualification field provides the displacement field for memory
5969 * operand instructions, if any.
5970 *
5971 * @returns Strict VBox status code (i.e. informational status codes too).
5972 * @retval VINF_SUCCESS if the operand was successfully decoded.
5973 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5974 * operand.
5975 * @param pVCpu The cross context virtual CPU structure.
5976 * @param uExitInstrInfo The VM-exit instruction information field.
5977 * @param enmMemAccess The memory operand's access type (read or write).
5978 * @param GCPtrDisp The instruction displacement field, if any. For
5979 * RIP-relative addressing pass RIP + displacement here.
5980 * @param pGCPtrMem Where to store the effective destination memory address.
5981 *
5982 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5983 * virtual-8086 mode hence skips those checks while verifying if the
5984 * segment is valid.
5985 */
5986static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
5987 PRTGCPTR pGCPtrMem)
5988{
5989 Assert(pGCPtrMem);
5990 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
5991 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
5992 | CPUMCTX_EXTRN_CR0);
5993
5994 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5995 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
5996 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
5997
5998 VMXEXITINSTRINFO ExitInstrInfo;
5999 ExitInstrInfo.u = uExitInstrInfo;
6000 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6001 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6002 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6003 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6004 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6005 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6006 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6007 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6008 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6009
6010 /*
6011 * Validate instruction information.
6012 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6013 */
6014 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6015 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6016 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6017 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6018 AssertLogRelMsgReturn(fIsMemOperand,
6019 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6020
6021 /*
6022 * Compute the complete effective address.
6023 *
6024 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6025 * See AMD spec. 4.5.2 "Segment Registers".
6026 */
6027 RTGCPTR GCPtrMem = GCPtrDisp;
6028 if (fBaseRegValid)
6029 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6030 if (fIdxRegValid)
6031 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6032
6033 RTGCPTR const GCPtrOff = GCPtrMem;
6034 if ( !fIsLongMode
6035 || iSegReg >= X86_SREG_FS)
6036 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6037 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6038
6039 /*
6040 * Validate effective address.
6041 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6042 */
6043 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6044 Assert(cbAccess > 0);
6045 if (fIsLongMode)
6046 {
6047 if (X86_IS_CANONICAL(GCPtrMem))
6048 {
6049 *pGCPtrMem = GCPtrMem;
6050 return VINF_SUCCESS;
6051 }
6052
6053 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6054 * "Data Limit Checks in 64-bit Mode". */
6055 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6056 vmxHCSetPendingXcptGP(pVCpu, 0);
6057 return VINF_HM_PENDING_XCPT;
6058 }
6059
6060 /*
6061 * This is a watered down version of iemMemApplySegment().
6062 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6063 * and segment CPL/DPL checks are skipped.
6064 */
6065 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6066 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6067 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6068
6069 /* Check if the segment is present and usable. */
6070 if ( pSel->Attr.n.u1Present
6071 && !pSel->Attr.n.u1Unusable)
6072 {
6073 Assert(pSel->Attr.n.u1DescType);
6074 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6075 {
6076 /* Check permissions for the data segment. */
6077 if ( enmMemAccess == VMXMEMACCESS_WRITE
6078 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6079 {
6080 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6081 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6082 return VINF_HM_PENDING_XCPT;
6083 }
6084
6085 /* Check limits if it's a normal data segment. */
6086 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6087 {
6088 if ( GCPtrFirst32 > pSel->u32Limit
6089 || GCPtrLast32 > pSel->u32Limit)
6090 {
6091 Log4Func(("Data segment limit exceeded. "
6092 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6093 GCPtrLast32, pSel->u32Limit));
6094 if (iSegReg == X86_SREG_SS)
6095 vmxHCSetPendingXcptSS(pVCpu, 0);
6096 else
6097 vmxHCSetPendingXcptGP(pVCpu, 0);
6098 return VINF_HM_PENDING_XCPT;
6099 }
6100 }
6101 else
6102 {
6103 /* Check limits if it's an expand-down data segment.
6104 Note! The upper boundary is defined by the B bit, not the G bit! */
6105 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6106 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6107 {
6108 Log4Func(("Expand-down data segment limit exceeded. "
6109 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6110 GCPtrLast32, pSel->u32Limit));
6111 if (iSegReg == X86_SREG_SS)
6112 vmxHCSetPendingXcptSS(pVCpu, 0);
6113 else
6114 vmxHCSetPendingXcptGP(pVCpu, 0);
6115 return VINF_HM_PENDING_XCPT;
6116 }
6117 }
6118 }
6119 else
6120 {
6121 /* Check permissions for the code segment. */
6122 if ( enmMemAccess == VMXMEMACCESS_WRITE
6123 || ( enmMemAccess == VMXMEMACCESS_READ
6124 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6125 {
6126 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6127 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6128 vmxHCSetPendingXcptGP(pVCpu, 0);
6129 return VINF_HM_PENDING_XCPT;
6130 }
6131
6132 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6133 if ( GCPtrFirst32 > pSel->u32Limit
6134 || GCPtrLast32 > pSel->u32Limit)
6135 {
6136 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6137 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6138 if (iSegReg == X86_SREG_SS)
6139 vmxHCSetPendingXcptSS(pVCpu, 0);
6140 else
6141 vmxHCSetPendingXcptGP(pVCpu, 0);
6142 return VINF_HM_PENDING_XCPT;
6143 }
6144 }
6145 }
6146 else
6147 {
6148 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6149 vmxHCSetPendingXcptGP(pVCpu, 0);
6150 return VINF_HM_PENDING_XCPT;
6151 }
6152
6153 *pGCPtrMem = GCPtrMem;
6154 return VINF_SUCCESS;
6155}
6156#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6157
6158
6159/**
6160 * VM-exit helper for LMSW.
6161 */
6162static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6163{
6164 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6165 AssertRCReturn(rc, rc);
6166
6167 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6168 AssertMsg( rcStrict == VINF_SUCCESS
6169 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6170
6171 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6172 if (rcStrict == VINF_IEM_RAISED_XCPT)
6173 {
6174 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6175 rcStrict = VINF_SUCCESS;
6176 }
6177
6178 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6179 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6180 return rcStrict;
6181}
6182
6183
6184/**
6185 * VM-exit helper for CLTS.
6186 */
6187static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6188{
6189 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6190 AssertRCReturn(rc, rc);
6191
6192 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6193 AssertMsg( rcStrict == VINF_SUCCESS
6194 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6195
6196 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6197 if (rcStrict == VINF_IEM_RAISED_XCPT)
6198 {
6199 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6200 rcStrict = VINF_SUCCESS;
6201 }
6202
6203 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6204 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6205 return rcStrict;
6206}
6207
6208
6209/**
6210 * VM-exit helper for MOV from CRx (CRx read).
6211 */
6212static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6213{
6214 Assert(iCrReg < 16);
6215 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6216
6217 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6218 AssertRCReturn(rc, rc);
6219
6220 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6221 AssertMsg( rcStrict == VINF_SUCCESS
6222 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6223
6224 if (iGReg == X86_GREG_xSP)
6225 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6226 else
6227 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6228#ifdef VBOX_WITH_STATISTICS
6229 switch (iCrReg)
6230 {
6231 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6232 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6233 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6234 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6235 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6236 }
6237#endif
6238 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6239 return rcStrict;
6240}
6241
6242
6243/**
6244 * VM-exit helper for MOV to CRx (CRx write).
6245 */
6246static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6247{
6248 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6249
6250 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6251 AssertMsg( rcStrict == VINF_SUCCESS
6252 || rcStrict == VINF_IEM_RAISED_XCPT
6253 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6254
6255 switch (iCrReg)
6256 {
6257 case 0:
6258 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6259 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6260 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6261 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6262 break;
6263
6264 case 2:
6265 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6266 /* Nothing to do here, CR2 it's not part of the VMCS. */
6267 break;
6268
6269 case 3:
6270 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6271 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6272 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6273 break;
6274
6275 case 4:
6276 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6277 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6278#ifndef IN_NEM_DARWIN
6279 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6280 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6281#else
6282 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6283#endif
6284 break;
6285
6286 case 8:
6287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6288 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6289 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6290 break;
6291
6292 default:
6293 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6294 break;
6295 }
6296
6297 if (rcStrict == VINF_IEM_RAISED_XCPT)
6298 {
6299 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6300 rcStrict = VINF_SUCCESS;
6301 }
6302 return rcStrict;
6303}
6304
6305
6306/**
6307 * VM-exit exception handler for \#PF (Page-fault exception).
6308 *
6309 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6310 */
6311static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6312{
6313 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6314 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6315
6316#ifndef IN_NEM_DARWIN
6317 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6318 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6319 { /* likely */ }
6320 else
6321#endif
6322 {
6323#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6324 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6325#endif
6326 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6327 if (!pVmxTransient->fVectoringDoublePF)
6328 {
6329 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6330 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6331 }
6332 else
6333 {
6334 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6335 Assert(!pVmxTransient->fIsNestedGuest);
6336 vmxHCSetPendingXcptDF(pVCpu);
6337 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6338 }
6339 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6340 return VINF_SUCCESS;
6341 }
6342
6343 Assert(!pVmxTransient->fIsNestedGuest);
6344
6345 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6346 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6347 if (pVmxTransient->fVectoringPF)
6348 {
6349 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6350 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6351 }
6352
6353 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6354 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6355 AssertRCReturn(rc, rc);
6356
6357 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6358 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6359
6360 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6361 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6362
6363 Log4Func(("#PF: rc=%Rrc\n", rc));
6364 if (rc == VINF_SUCCESS)
6365 {
6366 /*
6367 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6368 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6369 */
6370 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6371 TRPMResetTrap(pVCpu);
6372 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6373 return rc;
6374 }
6375
6376 if (rc == VINF_EM_RAW_GUEST_TRAP)
6377 {
6378 if (!pVmxTransient->fVectoringDoublePF)
6379 {
6380 /* It's a guest page fault and needs to be reflected to the guest. */
6381 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6382 TRPMResetTrap(pVCpu);
6383 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6384 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6385 uGstErrorCode, pVmxTransient->uExitQual);
6386 }
6387 else
6388 {
6389 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6390 TRPMResetTrap(pVCpu);
6391 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6392 vmxHCSetPendingXcptDF(pVCpu);
6393 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6394 }
6395
6396 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6397 return VINF_SUCCESS;
6398 }
6399
6400 TRPMResetTrap(pVCpu);
6401 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6402 return rc;
6403}
6404
6405
6406/**
6407 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6408 *
6409 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6410 */
6411static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6412{
6413 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6414 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6415
6416 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6417 AssertRCReturn(rc, rc);
6418
6419 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6420 {
6421 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6422 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6423
6424 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6425 * provides VM-exit instruction length. If this causes problem later,
6426 * disassemble the instruction like it's done on AMD-V. */
6427 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6428 AssertRCReturn(rc2, rc2);
6429 return rc;
6430 }
6431
6432 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6433 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6434 return VINF_SUCCESS;
6435}
6436
6437
6438/**
6439 * VM-exit exception handler for \#BP (Breakpoint exception).
6440 *
6441 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6442 */
6443static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6444{
6445 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6446 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6447
6448 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6449 AssertRCReturn(rc, rc);
6450
6451 VBOXSTRICTRC rcStrict;
6452 if (!pVmxTransient->fIsNestedGuest)
6453 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6454 else
6455 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6456
6457 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6458 {
6459 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6460 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6461 rcStrict = VINF_SUCCESS;
6462 }
6463
6464 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6465 return rcStrict;
6466}
6467
6468
6469/**
6470 * VM-exit exception handler for \#AC (Alignment-check exception).
6471 *
6472 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6473 */
6474static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6475{
6476 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6477
6478 /*
6479 * Detect #ACs caused by host having enabled split-lock detection.
6480 * Emulate such instructions.
6481 */
6482 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6483 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6484 AssertRCReturn(rc, rc);
6485 /** @todo detect split lock in cpu feature? */
6486 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6487 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6488 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6489 || CPUMGetGuestCPL(pVCpu) != 3
6490 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6491 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6492 {
6493 /*
6494 * Check for debug/trace events and import state accordingly.
6495 */
6496 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6497 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6498 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6499#ifndef IN_NEM_DARWIN
6500 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6501#endif
6502 )
6503 {
6504 if (pVM->cCpus == 1)
6505 {
6506#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6507 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6508#else
6509 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6510#endif
6511 AssertRCReturn(rc, rc);
6512 }
6513 }
6514 else
6515 {
6516 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6517 AssertRCReturn(rc, rc);
6518
6519 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6520
6521 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6522 {
6523 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6524 if (rcStrict != VINF_SUCCESS)
6525 return rcStrict;
6526 }
6527 }
6528
6529 /*
6530 * Emulate the instruction.
6531 *
6532 * We have to ignore the LOCK prefix here as we must not retrigger the
6533 * detection on the host. This isn't all that satisfactory, though...
6534 */
6535 if (pVM->cCpus == 1)
6536 {
6537 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6538 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6539
6540 /** @todo For SMP configs we should do a rendezvous here. */
6541 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6542 if (rcStrict == VINF_SUCCESS)
6543#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6544 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6545 HM_CHANGED_GUEST_RIP
6546 | HM_CHANGED_GUEST_RFLAGS
6547 | HM_CHANGED_GUEST_GPRS_MASK
6548 | HM_CHANGED_GUEST_CS
6549 | HM_CHANGED_GUEST_SS);
6550#else
6551 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6552#endif
6553 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6554 {
6555 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6556 rcStrict = VINF_SUCCESS;
6557 }
6558 return rcStrict;
6559 }
6560 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6561 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6562 return VINF_EM_EMULATE_SPLIT_LOCK;
6563 }
6564
6565 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6566 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6567 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6568
6569 /* Re-inject it. We'll detect any nesting before getting here. */
6570 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6571 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6572 return VINF_SUCCESS;
6573}
6574
6575
6576/**
6577 * VM-exit exception handler for \#DB (Debug exception).
6578 *
6579 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6580 */
6581static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6582{
6583 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6584 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6585
6586 /*
6587 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6588 */
6589 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6590
6591 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6592 uint64_t const uDR6 = X86_DR6_INIT_VAL
6593 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6594 | X86_DR6_BD | X86_DR6_BS));
6595
6596 int rc;
6597 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6598 if (!pVmxTransient->fIsNestedGuest)
6599 {
6600 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6601
6602 /*
6603 * Prevents stepping twice over the same instruction when the guest is stepping using
6604 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6605 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6606 */
6607 if ( rc == VINF_EM_DBG_STEPPED
6608 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6609 {
6610 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6611 rc = VINF_EM_RAW_GUEST_TRAP;
6612 }
6613 }
6614 else
6615 rc = VINF_EM_RAW_GUEST_TRAP;
6616 Log6Func(("rc=%Rrc\n", rc));
6617 if (rc == VINF_EM_RAW_GUEST_TRAP)
6618 {
6619 /*
6620 * The exception was for the guest. Update DR6, DR7.GD and
6621 * IA32_DEBUGCTL.LBR before forwarding it.
6622 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6623 */
6624#ifndef IN_NEM_DARWIN
6625 VMMRZCallRing3Disable(pVCpu);
6626 HM_DISABLE_PREEMPT(pVCpu);
6627
6628 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6629 pCtx->dr[6] |= uDR6;
6630 if (CPUMIsGuestDebugStateActive(pVCpu))
6631 ASMSetDR6(pCtx->dr[6]);
6632
6633 HM_RESTORE_PREEMPT();
6634 VMMRZCallRing3Enable(pVCpu);
6635#else
6636 /** @todo */
6637#endif
6638
6639 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6640 AssertRCReturn(rc, rc);
6641
6642 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6643 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6644
6645 /* Paranoia. */
6646 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6647 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6648
6649 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6650 AssertRC(rc);
6651
6652 /*
6653 * Raise #DB in the guest.
6654 *
6655 * It is important to reflect exactly what the VM-exit gave us (preserving the
6656 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6657 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6658 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6659 *
6660 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6661 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6662 */
6663 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6664 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6665 return VINF_SUCCESS;
6666 }
6667
6668 /*
6669 * Not a guest trap, must be a hypervisor related debug event then.
6670 * Update DR6 in case someone is interested in it.
6671 */
6672 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6673 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6674 CPUMSetHyperDR6(pVCpu, uDR6);
6675
6676 return rc;
6677}
6678
6679
6680/**
6681 * Hacks its way around the lovely mesa driver's backdoor accesses.
6682 *
6683 * @sa hmR0SvmHandleMesaDrvGp.
6684 */
6685static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6686{
6687 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6688 RT_NOREF(pCtx);
6689
6690 /* For now we'll just skip the instruction. */
6691 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6692}
6693
6694
6695/**
6696 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6697 * backdoor logging w/o checking what it is running inside.
6698 *
6699 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6700 * backdoor port and magic numbers loaded in registers.
6701 *
6702 * @returns true if it is, false if it isn't.
6703 * @sa hmR0SvmIsMesaDrvGp.
6704 */
6705DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6706{
6707 /* 0xed: IN eAX,dx */
6708 uint8_t abInstr[1];
6709 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6710 return false;
6711
6712 /* Check that it is #GP(0). */
6713 if (pVmxTransient->uExitIntErrorCode != 0)
6714 return false;
6715
6716 /* Check magic and port. */
6717 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6718 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6719 if (pCtx->rax != UINT32_C(0x564d5868))
6720 return false;
6721 if (pCtx->dx != UINT32_C(0x5658))
6722 return false;
6723
6724 /* Flat ring-3 CS. */
6725 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6726 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6727 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6728 if (pCtx->cs.Attr.n.u2Dpl != 3)
6729 return false;
6730 if (pCtx->cs.u64Base != 0)
6731 return false;
6732
6733 /* Check opcode. */
6734 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6735 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6736 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6737 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6738 if (RT_FAILURE(rc))
6739 return false;
6740 if (abInstr[0] != 0xed)
6741 return false;
6742
6743 return true;
6744}
6745
6746
6747/**
6748 * VM-exit exception handler for \#GP (General-protection exception).
6749 *
6750 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6751 */
6752static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6753{
6754 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6755 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6756
6757 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6758 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6759#ifndef IN_NEM_DARWIN
6760 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6761 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6762 { /* likely */ }
6763 else
6764#endif
6765 {
6766#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6767# ifndef IN_NEM_DARWIN
6768 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6769# else
6770 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6771# endif
6772#endif
6773 /*
6774 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6775 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6776 */
6777 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6778 AssertRCReturn(rc, rc);
6779 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6780 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6781
6782 if ( pVmxTransient->fIsNestedGuest
6783 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6784 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6785 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6786 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6787 else
6788 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6789 return rc;
6790 }
6791
6792#ifndef IN_NEM_DARWIN
6793 Assert(CPUMIsGuestInRealModeEx(pCtx));
6794 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6795 Assert(!pVmxTransient->fIsNestedGuest);
6796
6797 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6798 AssertRCReturn(rc, rc);
6799
6800 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6801 if (rcStrict == VINF_SUCCESS)
6802 {
6803 if (!CPUMIsGuestInRealModeEx(pCtx))
6804 {
6805 /*
6806 * The guest is no longer in real-mode, check if we can continue executing the
6807 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6808 */
6809 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6810 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6811 {
6812 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6814 }
6815 else
6816 {
6817 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6818 rcStrict = VINF_EM_RESCHEDULE;
6819 }
6820 }
6821 else
6822 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6823 }
6824 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6825 {
6826 rcStrict = VINF_SUCCESS;
6827 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6828 }
6829 return VBOXSTRICTRC_VAL(rcStrict);
6830#endif
6831}
6832
6833
6834/**
6835 * VM-exit exception handler for \#DE (Divide Error).
6836 *
6837 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6838 */
6839static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6840{
6841 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6842 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
6843
6844 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6845 AssertRCReturn(rc, rc);
6846
6847 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
6848 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
6849 {
6850 uint8_t cbInstr = 0;
6851 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
6852 if (rc2 == VINF_SUCCESS)
6853 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
6854 else if (rc2 == VERR_NOT_FOUND)
6855 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
6856 else
6857 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
6858 }
6859 else
6860 rcStrict = VINF_SUCCESS; /* Do nothing. */
6861
6862 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
6863 if (RT_FAILURE(rcStrict))
6864 {
6865 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6866 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6867 rcStrict = VINF_SUCCESS;
6868 }
6869
6870 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
6871 return VBOXSTRICTRC_VAL(rcStrict);
6872}
6873
6874
6875/**
6876 * VM-exit exception handler wrapper for all other exceptions that are not handled
6877 * by a specific handler.
6878 *
6879 * This simply re-injects the exception back into the VM without any special
6880 * processing.
6881 *
6882 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6883 */
6884static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6885{
6886 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6887
6888#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6889# ifndef IN_NEM_DARWIN
6890 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6891 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6892 ("uVector=%#x u32XcptBitmap=%#X32\n",
6893 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6894 NOREF(pVmcsInfo);
6895# endif
6896#endif
6897
6898 /*
6899 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6900 * would have been handled while checking exits due to event delivery.
6901 */
6902 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6903
6904#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6905 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6906 AssertRCReturn(rc, rc);
6907 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6908#endif
6909
6910#ifdef VBOX_WITH_STATISTICS
6911 switch (uVector)
6912 {
6913 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6914 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6915 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6916 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6917 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6918 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6919 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6920 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6921 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6922 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6923 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6924 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6925 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6926 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6927 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6928 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6929 default:
6930 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6931 break;
6932 }
6933#endif
6934
6935 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6936 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6937 NOREF(uVector);
6938
6939 /* Re-inject the original exception into the guest. */
6940 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6941 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6942 return VINF_SUCCESS;
6943}
6944
6945
6946/**
6947 * VM-exit exception handler for all exceptions (except NMIs!).
6948 *
6949 * @remarks This may be called for both guests and nested-guests. Take care to not
6950 * make assumptions and avoid doing anything that is not relevant when
6951 * executing a nested-guest (e.g., Mesa driver hacks).
6952 */
6953static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6954{
6955 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6956
6957 /*
6958 * If this VM-exit occurred while delivering an event through the guest IDT, take
6959 * action based on the return code and additional hints (e.g. for page-faults)
6960 * that will be updated in the VMX transient structure.
6961 */
6962 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6963 if (rcStrict == VINF_SUCCESS)
6964 {
6965 /*
6966 * If an exception caused a VM-exit due to delivery of an event, the original
6967 * event may have to be re-injected into the guest. We shall reinject it and
6968 * continue guest execution. However, page-fault is a complicated case and
6969 * needs additional processing done in vmxHCExitXcptPF().
6970 */
6971 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6972 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6973 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6974 || uVector == X86_XCPT_PF)
6975 {
6976 switch (uVector)
6977 {
6978 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6979 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6980 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6981 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6982 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6983 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6984 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
6985 default:
6986 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6987 }
6988 }
6989 /* else: inject pending event before resuming guest execution. */
6990 }
6991 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6992 {
6993 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6994 rcStrict = VINF_SUCCESS;
6995 }
6996
6997 return rcStrict;
6998}
6999/** @} */
7000
7001
7002/** @name VM-exit handlers.
7003 * @{
7004 */
7005/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7006/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7007/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7008
7009/**
7010 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7011 */
7012HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7013{
7014 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7015 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7016
7017#ifndef IN_NEM_DARWIN
7018 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7019 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7020 return VINF_SUCCESS;
7021 return VINF_EM_RAW_INTERRUPT;
7022#else
7023 return VINF_SUCCESS;
7024#endif
7025}
7026
7027
7028/**
7029 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7030 * VM-exit.
7031 */
7032HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7033{
7034 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7035 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7036
7037 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7038
7039 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7040 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7041 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7042
7043 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7044 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7045 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7046 NOREF(pVmcsInfo);
7047
7048 VBOXSTRICTRC rcStrict;
7049 switch (uExitIntType)
7050 {
7051#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7052 /*
7053 * Host physical NMIs:
7054 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7055 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7056 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7057 *
7058 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7059 * See Intel spec. 27.5.5 "Updating Non-Register State".
7060 */
7061 case VMX_EXIT_INT_INFO_TYPE_NMI:
7062 {
7063 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7064 break;
7065 }
7066#endif
7067
7068 /*
7069 * Privileged software exceptions (#DB from ICEBP),
7070 * Software exceptions (#BP and #OF),
7071 * Hardware exceptions:
7072 * Process the required exceptions and resume guest execution if possible.
7073 */
7074 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7075 Assert(uVector == X86_XCPT_DB);
7076 RT_FALL_THRU();
7077 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7078 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7079 RT_FALL_THRU();
7080 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7081 {
7082 NOREF(uVector);
7083 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7084 | HMVMX_READ_EXIT_INSTR_LEN
7085 | HMVMX_READ_IDT_VECTORING_INFO
7086 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7087 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7088 break;
7089 }
7090
7091 default:
7092 {
7093 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7094 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7095 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7096 break;
7097 }
7098 }
7099
7100 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7101 return rcStrict;
7102}
7103
7104
7105/**
7106 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7107 */
7108HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7109{
7110 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7111
7112 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7113 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7114 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7115
7116 /* Evaluate and deliver pending events and resume guest execution. */
7117 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7118 return VINF_SUCCESS;
7119}
7120
7121
7122/**
7123 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7124 */
7125HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7126{
7127 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7128
7129 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7130 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7131 {
7132 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7133 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7134 }
7135
7136 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7137
7138 /*
7139 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7140 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7141 */
7142 uint32_t fIntrState;
7143 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7144 AssertRC(rc);
7145 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7146 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7147 {
7148 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7149 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7150
7151 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7152 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7153 AssertRC(rc);
7154 }
7155
7156 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7157 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7158
7159 /* Evaluate and deliver pending events and resume guest execution. */
7160 return VINF_SUCCESS;
7161}
7162
7163
7164/**
7165 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7166 */
7167HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7168{
7169 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7170 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7171}
7172
7173
7174/**
7175 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7176 */
7177HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7178{
7179 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7180 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7181}
7182
7183
7184/**
7185 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7186 */
7187HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7188{
7189 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7190
7191 /*
7192 * Get the state we need and update the exit history entry.
7193 */
7194 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7195 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7196 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7197 AssertRCReturn(rc, rc);
7198
7199 VBOXSTRICTRC rcStrict;
7200 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7201 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7202 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7203 if (!pExitRec)
7204 {
7205 /*
7206 * Regular CPUID instruction execution.
7207 */
7208 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7209 if (rcStrict == VINF_SUCCESS)
7210 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7211 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7212 {
7213 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7214 rcStrict = VINF_SUCCESS;
7215 }
7216 }
7217 else
7218 {
7219 /*
7220 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7221 */
7222 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7223 AssertRCReturn(rc2, rc2);
7224
7225 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7226 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7227
7228 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7229 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7230
7231 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7232 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7233 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7234 }
7235 return rcStrict;
7236}
7237
7238
7239/**
7240 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7241 */
7242HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7243{
7244 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7245
7246 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7247 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7248 AssertRCReturn(rc, rc);
7249
7250 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7251 return VINF_EM_RAW_EMULATE_INSTR;
7252
7253 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7254 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7255}
7256
7257
7258/**
7259 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7260 */
7261HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7262{
7263 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7264
7265 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7266 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7267 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7268 AssertRCReturn(rc, rc);
7269
7270 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7271 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7272 {
7273 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7274 we must reset offsetting on VM-entry. See @bugref{6634}. */
7275 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7276 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7277 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7278 }
7279 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7280 {
7281 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7282 rcStrict = VINF_SUCCESS;
7283 }
7284 return rcStrict;
7285}
7286
7287
7288/**
7289 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7290 */
7291HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7292{
7293 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7294
7295 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7296 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7297 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7298 AssertRCReturn(rc, rc);
7299
7300 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7301 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7302 {
7303 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7304 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7305 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7306 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7307 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7308 }
7309 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7310 {
7311 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7312 rcStrict = VINF_SUCCESS;
7313 }
7314 return rcStrict;
7315}
7316
7317
7318/**
7319 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7320 */
7321HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7322{
7323 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7324
7325 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7326 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7327 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7328 AssertRCReturn(rc, rc);
7329
7330 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7331 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7332 if (RT_LIKELY(rc == VINF_SUCCESS))
7333 {
7334 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7335 Assert(pVmxTransient->cbExitInstr == 2);
7336 }
7337 else
7338 {
7339 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7340 rc = VERR_EM_INTERPRETER;
7341 }
7342 return rc;
7343}
7344
7345
7346/**
7347 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7348 */
7349HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7350{
7351 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7352
7353 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7354 if (EMAreHypercallInstructionsEnabled(pVCpu))
7355 {
7356 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7357 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7358 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7359 AssertRCReturn(rc, rc);
7360
7361 /* Perform the hypercall. */
7362 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7363 if (rcStrict == VINF_SUCCESS)
7364 {
7365 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7366 AssertRCReturn(rc, rc);
7367 }
7368 else
7369 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7370 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7371 || RT_FAILURE(rcStrict));
7372
7373 /* If the hypercall changes anything other than guest's general-purpose registers,
7374 we would need to reload the guest changed bits here before VM-entry. */
7375 }
7376 else
7377 Log4Func(("Hypercalls not enabled\n"));
7378
7379 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7380 if (RT_FAILURE(rcStrict))
7381 {
7382 vmxHCSetPendingXcptUD(pVCpu);
7383 rcStrict = VINF_SUCCESS;
7384 }
7385
7386 return rcStrict;
7387}
7388
7389
7390/**
7391 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7392 */
7393HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7394{
7395 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7396#ifndef IN_NEM_DARWIN
7397 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7398#endif
7399
7400 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7401 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7402 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7403 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7404 AssertRCReturn(rc, rc);
7405
7406 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7407
7408 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7409 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7410 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7411 {
7412 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7413 rcStrict = VINF_SUCCESS;
7414 }
7415 else
7416 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7417 VBOXSTRICTRC_VAL(rcStrict)));
7418 return rcStrict;
7419}
7420
7421
7422/**
7423 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7424 */
7425HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7426{
7427 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7428
7429 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7430 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7431 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7432 AssertRCReturn(rc, rc);
7433
7434 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7435 if (rcStrict == VINF_SUCCESS)
7436 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7437 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7438 {
7439 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7440 rcStrict = VINF_SUCCESS;
7441 }
7442
7443 return rcStrict;
7444}
7445
7446
7447/**
7448 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7449 */
7450HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7451{
7452 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7453
7454 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7455 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7456 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7457 AssertRCReturn(rc, rc);
7458
7459 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7460 if (RT_SUCCESS(rcStrict))
7461 {
7462 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7463 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7464 rcStrict = VINF_SUCCESS;
7465 }
7466
7467 return rcStrict;
7468}
7469
7470
7471/**
7472 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7473 * VM-exit.
7474 */
7475HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7476{
7477 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7478 return VINF_EM_RESET;
7479}
7480
7481
7482/**
7483 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7484 */
7485HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7486{
7487 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7488
7489 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7490 AssertRCReturn(rc, rc);
7491
7492 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7493 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7494 rc = VINF_SUCCESS;
7495 else
7496 rc = VINF_EM_HALT;
7497
7498 if (rc != VINF_SUCCESS)
7499 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7500 return rc;
7501}
7502
7503
7504#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7505/**
7506 * VM-exit handler for instructions that result in a \#UD exception delivered to
7507 * the guest.
7508 */
7509HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7510{
7511 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7512 vmxHCSetPendingXcptUD(pVCpu);
7513 return VINF_SUCCESS;
7514}
7515#endif
7516
7517
7518/**
7519 * VM-exit handler for expiry of the VMX-preemption timer.
7520 */
7521HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7522{
7523 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7524
7525 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7526 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7527Log12(("vmxHCExitPreemptTimer:\n"));
7528
7529 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7530 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7531 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7532 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7533 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7534}
7535
7536
7537/**
7538 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7539 */
7540HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7541{
7542 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7543
7544 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7545 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7546 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7547 AssertRCReturn(rc, rc);
7548
7549 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7550 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7551 : HM_CHANGED_RAISED_XCPT_MASK);
7552
7553#ifndef IN_NEM_DARWIN
7554 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7555 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7556 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7557 {
7558 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7559 hmR0VmxUpdateStartVmFunction(pVCpu);
7560 }
7561#endif
7562
7563 return rcStrict;
7564}
7565
7566
7567/**
7568 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7569 */
7570HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7571{
7572 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7573
7574 /** @todo Enable the new code after finding a reliably guest test-case. */
7575#if 1
7576 return VERR_EM_INTERPRETER;
7577#else
7578 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7579 | HMVMX_READ_EXIT_INSTR_INFO
7580 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7581 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7582 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7583 AssertRCReturn(rc, rc);
7584
7585 /* Paranoia. Ensure this has a memory operand. */
7586 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7587
7588 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7589 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7590 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7591 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7592
7593 RTGCPTR GCPtrDesc;
7594 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7595
7596 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7597 GCPtrDesc, uType);
7598 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7599 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7600 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7601 {
7602 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7603 rcStrict = VINF_SUCCESS;
7604 }
7605 return rcStrict;
7606#endif
7607}
7608
7609
7610/**
7611 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7612 * VM-exit.
7613 */
7614HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7615{
7616 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7617 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7618 AssertRCReturn(rc, rc);
7619
7620 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7621 if (RT_FAILURE(rc))
7622 return rc;
7623
7624 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7625 NOREF(uInvalidReason);
7626
7627#ifdef VBOX_STRICT
7628 uint32_t fIntrState;
7629 uint64_t u64Val;
7630 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
7631 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7632 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7633
7634 Log4(("uInvalidReason %u\n", uInvalidReason));
7635 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7636 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7637 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7638
7639 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7640 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7641 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7642 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7643 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7644 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7645 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7646 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7647 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7648 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7649 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7650 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7651# ifndef IN_NEM_DARWIN
7652 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7653 {
7654 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7655 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7656 }
7657
7658 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7659# endif
7660#endif
7661
7662 return VERR_VMX_INVALID_GUEST_STATE;
7663}
7664
7665/**
7666 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7667 */
7668HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7669{
7670 /*
7671 * Cumulative notes of all recognized but unexpected VM-exits.
7672 *
7673 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7674 * nested-paging is used.
7675 *
7676 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7677 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7678 * this function (and thereby stop VM execution) for handling such instructions.
7679 *
7680 *
7681 * VMX_EXIT_INIT_SIGNAL:
7682 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7683 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7684 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7685 *
7686 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7687 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7688 * See Intel spec. "23.8 Restrictions on VMX operation".
7689 *
7690 * VMX_EXIT_SIPI:
7691 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7692 * activity state is used. We don't make use of it as our guests don't have direct
7693 * access to the host local APIC.
7694 *
7695 * See Intel spec. 25.3 "Other Causes of VM-exits".
7696 *
7697 * VMX_EXIT_IO_SMI:
7698 * VMX_EXIT_SMI:
7699 * This can only happen if we support dual-monitor treatment of SMI, which can be
7700 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7701 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7702 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7703 *
7704 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7705 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7706 *
7707 * VMX_EXIT_ERR_MSR_LOAD:
7708 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7709 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7710 * execution.
7711 *
7712 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7713 *
7714 * VMX_EXIT_ERR_MACHINE_CHECK:
7715 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7716 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7717 * #MC exception abort class exception is raised. We thus cannot assume a
7718 * reasonable chance of continuing any sort of execution and we bail.
7719 *
7720 * See Intel spec. 15.1 "Machine-check Architecture".
7721 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7722 *
7723 * VMX_EXIT_PML_FULL:
7724 * VMX_EXIT_VIRTUALIZED_EOI:
7725 * VMX_EXIT_APIC_WRITE:
7726 * We do not currently support any of these features and thus they are all unexpected
7727 * VM-exits.
7728 *
7729 * VMX_EXIT_GDTR_IDTR_ACCESS:
7730 * VMX_EXIT_LDTR_TR_ACCESS:
7731 * VMX_EXIT_RDRAND:
7732 * VMX_EXIT_RSM:
7733 * VMX_EXIT_VMFUNC:
7734 * VMX_EXIT_ENCLS:
7735 * VMX_EXIT_RDSEED:
7736 * VMX_EXIT_XSAVES:
7737 * VMX_EXIT_XRSTORS:
7738 * VMX_EXIT_UMWAIT:
7739 * VMX_EXIT_TPAUSE:
7740 * VMX_EXIT_LOADIWKEY:
7741 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7742 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7743 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7744 *
7745 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7746 */
7747 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7748 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7749 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7750}
7751
7752
7753/**
7754 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7755 */
7756HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7757{
7758 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7759
7760 /** @todo Optimize this: We currently drag in the whole MSR state
7761 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7762 * MSRs required. That would require changes to IEM and possibly CPUM too.
7763 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7764 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7765 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7766 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7767 switch (idMsr)
7768 {
7769 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7770 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7771 }
7772
7773 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7774 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7775 AssertRCReturn(rc, rc);
7776
7777 Log4Func(("ecx=%#RX32\n", idMsr));
7778
7779#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7780 Assert(!pVmxTransient->fIsNestedGuest);
7781 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7782 {
7783 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7784 && idMsr != MSR_K6_EFER)
7785 {
7786 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7787 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7788 }
7789 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7790 {
7791 Assert(pVmcsInfo->pvMsrBitmap);
7792 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7793 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7794 {
7795 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7796 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7797 }
7798 }
7799 }
7800#endif
7801
7802 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7803 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7804 if (rcStrict == VINF_SUCCESS)
7805 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7806 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7807 {
7808 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7809 rcStrict = VINF_SUCCESS;
7810 }
7811 else
7812 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7813 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7814
7815 return rcStrict;
7816}
7817
7818
7819/**
7820 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7821 */
7822HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7823{
7824 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7825
7826 /** @todo Optimize this: We currently drag in the whole MSR state
7827 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7828 * MSRs required. That would require changes to IEM and possibly CPUM too.
7829 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7830 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7831 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7832
7833 /*
7834 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7835 * Although we don't need to fetch the base as it will be overwritten shortly, while
7836 * loading guest-state we would also load the entire segment register including limit
7837 * and attributes and thus we need to load them here.
7838 */
7839 switch (idMsr)
7840 {
7841 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7842 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7843 }
7844
7845 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7846 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7847 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7848 AssertRCReturn(rc, rc);
7849
7850 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7851
7852 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7853 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7854
7855 if (rcStrict == VINF_SUCCESS)
7856 {
7857 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7858
7859 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7860 if ( idMsr == MSR_IA32_APICBASE
7861 || ( idMsr >= MSR_IA32_X2APIC_START
7862 && idMsr <= MSR_IA32_X2APIC_END))
7863 {
7864 /*
7865 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7866 * When full APIC register virtualization is implemented we'll have to make
7867 * sure APIC state is saved from the VMCS before IEM changes it.
7868 */
7869 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7870 }
7871 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7872 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7873 else if (idMsr == MSR_K6_EFER)
7874 {
7875 /*
7876 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7877 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7878 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7879 */
7880 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7881 }
7882
7883 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7884 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7885 {
7886 switch (idMsr)
7887 {
7888 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7889 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7890 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7891 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7892 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7893 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7894 default:
7895 {
7896#ifndef IN_NEM_DARWIN
7897 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7898 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7899 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7900 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7901#else
7902 AssertMsgFailed(("TODO\n"));
7903#endif
7904 break;
7905 }
7906 }
7907 }
7908#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7909 else
7910 {
7911 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7912 switch (idMsr)
7913 {
7914 case MSR_IA32_SYSENTER_CS:
7915 case MSR_IA32_SYSENTER_EIP:
7916 case MSR_IA32_SYSENTER_ESP:
7917 case MSR_K8_FS_BASE:
7918 case MSR_K8_GS_BASE:
7919 {
7920 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7921 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7922 }
7923
7924 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7925 default:
7926 {
7927 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7928 {
7929 /* EFER MSR writes are always intercepted. */
7930 if (idMsr != MSR_K6_EFER)
7931 {
7932 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7933 idMsr));
7934 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7935 }
7936 }
7937
7938 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7939 {
7940 Assert(pVmcsInfo->pvMsrBitmap);
7941 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7942 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7943 {
7944 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7945 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7946 }
7947 }
7948 break;
7949 }
7950 }
7951 }
7952#endif /* VBOX_STRICT */
7953 }
7954 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7955 {
7956 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7957 rcStrict = VINF_SUCCESS;
7958 }
7959 else
7960 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7961 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7962
7963 return rcStrict;
7964}
7965
7966
7967/**
7968 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7969 */
7970HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7971{
7972 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7973
7974 /** @todo The guest has likely hit a contended spinlock. We might want to
7975 * poke a schedule different guest VCPU. */
7976 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7977 if (RT_SUCCESS(rc))
7978 return VINF_EM_RAW_INTERRUPT;
7979
7980 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7981 return rc;
7982}
7983
7984
7985/**
7986 * VM-exit handler for when the TPR value is lowered below the specified
7987 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7988 */
7989HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7990{
7991 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7992 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7993
7994 /*
7995 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
7996 * We'll re-evaluate pending interrupts and inject them before the next VM
7997 * entry so we can just continue execution here.
7998 */
7999 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8000 return VINF_SUCCESS;
8001}
8002
8003
8004/**
8005 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8006 * VM-exit.
8007 *
8008 * @retval VINF_SUCCESS when guest execution can continue.
8009 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8010 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8011 * incompatible guest state for VMX execution (real-on-v86 case).
8012 */
8013HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8014{
8015 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8016 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8017
8018 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8019 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8020 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8021
8022 VBOXSTRICTRC rcStrict;
8023 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8024 uint64_t const uExitQual = pVmxTransient->uExitQual;
8025 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8026 switch (uAccessType)
8027 {
8028 /*
8029 * MOV to CRx.
8030 */
8031 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8032 {
8033 /*
8034 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8035 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8036 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8037 * PAE PDPTEs as well.
8038 */
8039 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8040 AssertRCReturn(rc, rc);
8041
8042 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8043#ifndef IN_NEM_DARWIN
8044 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8045#endif
8046 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8047 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8048
8049 /*
8050 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8051 * - When nested paging isn't used.
8052 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8053 * - We are executing in the VM debug loop.
8054 */
8055#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8056# ifndef IN_NEM_DARWIN
8057 Assert( iCrReg != 3
8058 || !VM_IS_VMX_NESTED_PAGING(pVM)
8059 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8060 || pVCpu->hmr0.s.fUsingDebugLoop);
8061# else
8062 Assert( iCrReg != 3
8063 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8064# endif
8065#endif
8066
8067 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8068 Assert( iCrReg != 8
8069 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8070
8071 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8072 AssertMsg( rcStrict == VINF_SUCCESS
8073 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8074
8075#ifndef IN_NEM_DARWIN
8076 /*
8077 * This is a kludge for handling switches back to real mode when we try to use
8078 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8079 * deal with special selector values, so we have to return to ring-3 and run
8080 * there till the selector values are V86 mode compatible.
8081 *
8082 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8083 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8084 * this function.
8085 */
8086 if ( iCrReg == 0
8087 && rcStrict == VINF_SUCCESS
8088 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8089 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8090 && (uOldCr0 & X86_CR0_PE)
8091 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8092 {
8093 /** @todo Check selectors rather than returning all the time. */
8094 Assert(!pVmxTransient->fIsNestedGuest);
8095 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8096 rcStrict = VINF_EM_RESCHEDULE_REM;
8097 }
8098#endif
8099
8100 break;
8101 }
8102
8103 /*
8104 * MOV from CRx.
8105 */
8106 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8107 {
8108 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8109 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8110
8111 /*
8112 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8113 * - When nested paging isn't used.
8114 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8115 * - We are executing in the VM debug loop.
8116 */
8117#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8118# ifndef IN_NEM_DARWIN
8119 Assert( iCrReg != 3
8120 || !VM_IS_VMX_NESTED_PAGING(pVM)
8121 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8122 || pVCpu->hmr0.s.fLeaveDone);
8123# else
8124 Assert( iCrReg != 3
8125 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8126# endif
8127#endif
8128
8129 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8130 Assert( iCrReg != 8
8131 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8132
8133 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8134 break;
8135 }
8136
8137 /*
8138 * CLTS (Clear Task-Switch Flag in CR0).
8139 */
8140 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8141 {
8142 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8143 break;
8144 }
8145
8146 /*
8147 * LMSW (Load Machine-Status Word into CR0).
8148 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8149 */
8150 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8151 {
8152 RTGCPTR GCPtrEffDst;
8153 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8154 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8155 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8156 if (fMemOperand)
8157 {
8158 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8159 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8160 }
8161 else
8162 GCPtrEffDst = NIL_RTGCPTR;
8163 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8164 break;
8165 }
8166
8167 default:
8168 {
8169 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8170 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8171 }
8172 }
8173
8174 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8175 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8176 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8177
8178 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8179 NOREF(pVM);
8180 return rcStrict;
8181}
8182
8183
8184/**
8185 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8186 * VM-exit.
8187 */
8188HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8189{
8190 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8191 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8192
8193 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8194 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8195 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8196 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8197 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8198 | CPUMCTX_EXTRN_EFER);
8199 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8200 AssertRCReturn(rc, rc);
8201
8202 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8203 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8204 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8205 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8206 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8207 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8208 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8209 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8210
8211 /*
8212 * Update exit history to see if this exit can be optimized.
8213 */
8214 VBOXSTRICTRC rcStrict;
8215 PCEMEXITREC pExitRec = NULL;
8216 if ( !fGstStepping
8217 && !fDbgStepping)
8218 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8219 !fIOString
8220 ? !fIOWrite
8221 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8222 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8223 : !fIOWrite
8224 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8225 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8226 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8227 if (!pExitRec)
8228 {
8229 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8230 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8231
8232 uint32_t const cbValue = s_aIOSizes[uIOSize];
8233 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8234 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8235 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8236 if (fIOString)
8237 {
8238 /*
8239 * INS/OUTS - I/O String instruction.
8240 *
8241 * Use instruction-information if available, otherwise fall back on
8242 * interpreting the instruction.
8243 */
8244 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8245 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8246 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8247 if (fInsOutsInfo)
8248 {
8249 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8250 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8251 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8252 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8253 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8254 if (fIOWrite)
8255 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8256 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8257 else
8258 {
8259 /*
8260 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8261 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8262 * See Intel Instruction spec. for "INS".
8263 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8264 */
8265 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8266 }
8267 }
8268 else
8269 rcStrict = IEMExecOne(pVCpu);
8270
8271 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8272 fUpdateRipAlready = true;
8273 }
8274 else
8275 {
8276 /*
8277 * IN/OUT - I/O instruction.
8278 */
8279 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8280 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8281 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8282 if (fIOWrite)
8283 {
8284 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8285 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8286#ifndef IN_NEM_DARWIN
8287 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8288 && !pCtx->eflags.Bits.u1TF)
8289 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8290#endif
8291 }
8292 else
8293 {
8294 uint32_t u32Result = 0;
8295 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8296 if (IOM_SUCCESS(rcStrict))
8297 {
8298 /* Save result of I/O IN instr. in AL/AX/EAX. */
8299 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8300 }
8301#ifndef IN_NEM_DARWIN
8302 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8303 && !pCtx->eflags.Bits.u1TF)
8304 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8305#endif
8306 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8307 }
8308 }
8309
8310 if (IOM_SUCCESS(rcStrict))
8311 {
8312 if (!fUpdateRipAlready)
8313 {
8314 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8315 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8316 }
8317
8318 /*
8319 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8320 * while booting Fedora 17 64-bit guest.
8321 *
8322 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8323 */
8324 if (fIOString)
8325 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8326
8327 /*
8328 * If any I/O breakpoints are armed, we need to check if one triggered
8329 * and take appropriate action.
8330 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8331 */
8332 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8333 AssertRCReturn(rc, rc);
8334
8335 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8336 * execution engines about whether hyper BPs and such are pending. */
8337 uint32_t const uDr7 = pCtx->dr[7];
8338 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8339 && X86_DR7_ANY_RW_IO(uDr7)
8340 && (pCtx->cr4 & X86_CR4_DE))
8341 || DBGFBpIsHwIoArmed(pVM)))
8342 {
8343 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8344
8345#ifndef IN_NEM_DARWIN
8346 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8347 VMMRZCallRing3Disable(pVCpu);
8348 HM_DISABLE_PREEMPT(pVCpu);
8349
8350 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8351
8352 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8353 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8354 {
8355 /* Raise #DB. */
8356 if (fIsGuestDbgActive)
8357 ASMSetDR6(pCtx->dr[6]);
8358 if (pCtx->dr[7] != uDr7)
8359 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8360
8361 vmxHCSetPendingXcptDB(pVCpu);
8362 }
8363 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8364 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8365 else if ( rcStrict2 != VINF_SUCCESS
8366 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8367 rcStrict = rcStrict2;
8368 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8369
8370 HM_RESTORE_PREEMPT();
8371 VMMRZCallRing3Enable(pVCpu);
8372#else
8373 /** @todo */
8374#endif
8375 }
8376 }
8377
8378#ifdef VBOX_STRICT
8379 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8380 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8381 Assert(!fIOWrite);
8382 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8383 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8384 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8385 Assert(fIOWrite);
8386 else
8387 {
8388# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8389 * statuses, that the VMM device and some others may return. See
8390 * IOM_SUCCESS() for guidance. */
8391 AssertMsg( RT_FAILURE(rcStrict)
8392 || rcStrict == VINF_SUCCESS
8393 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8394 || rcStrict == VINF_EM_DBG_BREAKPOINT
8395 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8396 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8397# endif
8398 }
8399#endif
8400 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8401 }
8402 else
8403 {
8404 /*
8405 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8406 */
8407 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8408 AssertRCReturn(rc2, rc2);
8409 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8410 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8411 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8412 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8413 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8414 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8415
8416 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8417 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8418
8419 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8420 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8421 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8422 }
8423 return rcStrict;
8424}
8425
8426
8427/**
8428 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8429 * VM-exit.
8430 */
8431HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8432{
8433 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8434
8435 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8436 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8437 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8438 {
8439 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8440 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8441 {
8442 uint32_t uErrCode;
8443 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8444 {
8445 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8446 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8447 }
8448 else
8449 uErrCode = 0;
8450
8451 RTGCUINTPTR GCPtrFaultAddress;
8452 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8453 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8454 else
8455 GCPtrFaultAddress = 0;
8456
8457 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8458
8459 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8460 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8461
8462 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8463 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8464 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8465 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8466 }
8467 }
8468
8469 /* Fall back to the interpreter to emulate the task-switch. */
8470 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8471 return VERR_EM_INTERPRETER;
8472}
8473
8474
8475/**
8476 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8477 */
8478HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8479{
8480 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8481
8482 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8483 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8484 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8485 AssertRC(rc);
8486 return VINF_EM_DBG_STEPPED;
8487}
8488
8489
8490/**
8491 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8492 */
8493HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8494{
8495 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8496 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8497
8498 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8499 | HMVMX_READ_EXIT_INSTR_LEN
8500 | HMVMX_READ_EXIT_INTERRUPTION_INFO
8501 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
8502 | HMVMX_READ_IDT_VECTORING_INFO
8503 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8504
8505 /*
8506 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8507 */
8508 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8509 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8510 {
8511 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8512 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8513 {
8514 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8515 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8516 }
8517 }
8518 else
8519 {
8520 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8521 return rcStrict;
8522 }
8523
8524 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8525 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8526 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8527 AssertRCReturn(rc, rc);
8528
8529 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8530 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8531 switch (uAccessType)
8532 {
8533#ifndef IN_NEM_DARWIN
8534 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8535 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8536 {
8537 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8538 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8539 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8540
8541 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8542 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8543 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8544 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8545 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8546
8547 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8548 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8549 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8550 if ( rcStrict == VINF_SUCCESS
8551 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8552 || rcStrict == VERR_PAGE_NOT_PRESENT)
8553 {
8554 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8555 | HM_CHANGED_GUEST_APIC_TPR);
8556 rcStrict = VINF_SUCCESS;
8557 }
8558 break;
8559 }
8560#else
8561 /** @todo */
8562#endif
8563
8564 default:
8565 {
8566 Log4Func(("uAccessType=%#x\n", uAccessType));
8567 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8568 break;
8569 }
8570 }
8571
8572 if (rcStrict != VINF_SUCCESS)
8573 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8574 return rcStrict;
8575}
8576
8577
8578/**
8579 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8580 * VM-exit.
8581 */
8582HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8583{
8584 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8585 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8586
8587 /*
8588 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8589 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8590 * must emulate the MOV DRx access.
8591 */
8592 if (!pVmxTransient->fIsNestedGuest)
8593 {
8594 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8595 if (pVmxTransient->fWasGuestDebugStateActive)
8596 {
8597 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8598 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8599 }
8600
8601 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8602 && !pVmxTransient->fWasHyperDebugStateActive)
8603 {
8604 Assert(!DBGFIsStepping(pVCpu));
8605 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8606
8607 /* Don't intercept MOV DRx any more. */
8608 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8609 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8610 AssertRC(rc);
8611
8612#ifndef IN_NEM_DARWIN
8613 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8614 VMMRZCallRing3Disable(pVCpu);
8615 HM_DISABLE_PREEMPT(pVCpu);
8616
8617 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8618 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8619 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8620
8621 HM_RESTORE_PREEMPT();
8622 VMMRZCallRing3Enable(pVCpu);
8623#else
8624 CPUMR3NemActivateGuestDebugState(pVCpu);
8625 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8626 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8627#endif
8628
8629#ifdef VBOX_WITH_STATISTICS
8630 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8631 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8632 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8633 else
8634 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8635#endif
8636 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8637 return VINF_SUCCESS;
8638 }
8639 }
8640
8641 /*
8642 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8643 * The EFER MSR is always up-to-date.
8644 * Update the segment registers and DR7 from the CPU.
8645 */
8646 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8647 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8648 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8649 AssertRCReturn(rc, rc);
8650 Log4Func(("cs:rip=%#04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
8651
8652 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8653 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8654 {
8655 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8656 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8657 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8658 if (RT_SUCCESS(rc))
8659 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8660 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8661 }
8662 else
8663 {
8664 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8665 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8666 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8667 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8668 }
8669
8670 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8671 if (RT_SUCCESS(rc))
8672 {
8673 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8674 AssertRCReturn(rc2, rc2);
8675 return VINF_SUCCESS;
8676 }
8677 return rc;
8678}
8679
8680
8681/**
8682 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8683 * Conditional VM-exit.
8684 */
8685HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8686{
8687 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8688
8689#ifndef IN_NEM_DARWIN
8690 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8691
8692 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
8693 | HMVMX_READ_EXIT_INTERRUPTION_INFO
8694 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
8695 | HMVMX_READ_IDT_VECTORING_INFO
8696 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
8697 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
8698
8699 /*
8700 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8701 */
8702 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8703 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8704 {
8705 /*
8706 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8707 * instruction emulation to inject the original event. Otherwise, injecting the original event
8708 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8709 */
8710 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8711 { /* likely */ }
8712 else
8713 {
8714 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8715# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8716 /** @todo NSTVMX: Think about how this should be handled. */
8717 if (pVmxTransient->fIsNestedGuest)
8718 return VERR_VMX_IPE_3;
8719# endif
8720 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8721 }
8722 }
8723 else
8724 {
8725 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8726 return rcStrict;
8727 }
8728
8729 /*
8730 * Get sufficient state and update the exit history entry.
8731 */
8732 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8733 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8734 AssertRCReturn(rc, rc);
8735
8736 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8737 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8738 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8739 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8740 if (!pExitRec)
8741 {
8742 /*
8743 * If we succeed, resume guest execution.
8744 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8745 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8746 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8747 * weird case. See @bugref{6043}.
8748 */
8749 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8750 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8751/** @todo bird: We can probably just go straight to IOM here and assume that
8752 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8753 * well. However, we need to address that aliasing workarounds that
8754 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8755 *
8756 * Might also be interesting to see if we can get this done more or
8757 * less locklessly inside IOM. Need to consider the lookup table
8758 * updating and use a bit more carefully first (or do all updates via
8759 * rendezvous) */
8760 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8761 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8762 if ( rcStrict == VINF_SUCCESS
8763 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8764 || rcStrict == VERR_PAGE_NOT_PRESENT)
8765 {
8766 /* Successfully handled MMIO operation. */
8767 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8768 | HM_CHANGED_GUEST_APIC_TPR);
8769 rcStrict = VINF_SUCCESS;
8770 }
8771 }
8772 else
8773 {
8774 /*
8775 * Frequent exit or something needing probing. Call EMHistoryExec.
8776 */
8777 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8778 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8779
8780 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8781 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8782
8783 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8784 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8785 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8786 }
8787 return rcStrict;
8788#else
8789 AssertFailed();
8790 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8791#endif
8792}
8793
8794
8795/**
8796 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8797 * VM-exit.
8798 */
8799HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8800{
8801 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8802#ifndef IN_NEM_DARWIN
8803 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8804
8805 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8806 | HMVMX_READ_EXIT_INSTR_LEN
8807 | HMVMX_READ_EXIT_INTERRUPTION_INFO
8808 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
8809 | HMVMX_READ_IDT_VECTORING_INFO
8810 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
8811 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
8812
8813 /*
8814 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8815 */
8816 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8817 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8818 {
8819 /*
8820 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8821 * we shall resolve the nested #PF and re-inject the original event.
8822 */
8823 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8824 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8825 }
8826 else
8827 {
8828 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8829 return rcStrict;
8830 }
8831
8832 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8833 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8834 AssertRCReturn(rc, rc);
8835
8836 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8837 uint64_t const uExitQual = pVmxTransient->uExitQual;
8838 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8839
8840 RTGCUINT uErrorCode = 0;
8841 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8842 uErrorCode |= X86_TRAP_PF_ID;
8843 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8844 uErrorCode |= X86_TRAP_PF_RW;
8845 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8846 uErrorCode |= X86_TRAP_PF_P;
8847
8848 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8849 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8850
8851 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8852
8853 /*
8854 * Handle the pagefault trap for the nested shadow table.
8855 */
8856 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8857 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8858 TRPMResetTrap(pVCpu);
8859
8860 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8861 if ( rcStrict == VINF_SUCCESS
8862 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8863 || rcStrict == VERR_PAGE_NOT_PRESENT)
8864 {
8865 /* Successfully synced our nested page tables. */
8866 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8868 return VINF_SUCCESS;
8869 }
8870 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8871 return rcStrict;
8872
8873#else /* IN_NEM_DARWIN */
8874 PVM pVM = pVCpu->CTX_SUFF(pVM);
8875 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8876 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8877 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
8878 vmxHCImportGuestRip(pVCpu);
8879 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
8880
8881 /*
8882 * Ask PGM for information about the given GCPhys. We need to check if we're
8883 * out of sync first.
8884 */
8885 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8886 PGMPHYSNEMPAGEINFO Info;
8887 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8888 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8889 if (RT_SUCCESS(rc))
8890 {
8891 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8892 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8893 {
8894 if (State.fCanResume)
8895 {
8896 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8897 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8898 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8899 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8900 State.fDidSomething ? "" : " no-change"));
8901 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8902 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8903 return VINF_SUCCESS;
8904 }
8905 }
8906
8907 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8908 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8909 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8910 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8911 State.fDidSomething ? "" : " no-change"));
8912 }
8913 else
8914 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8915 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8916 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8917
8918 /*
8919 * Emulate the memory access, either access handler or special memory.
8920 */
8921 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8922 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8923 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8924 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8925 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8926
8927 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8928 AssertRCReturn(rc, rc);
8929
8930 VBOXSTRICTRC rcStrict;
8931 if (!pExitRec)
8932 rcStrict = IEMExecOne(pVCpu);
8933 else
8934 {
8935 /* Frequent access or probing. */
8936 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8937 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8938 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8939 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8940 }
8941
8942 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8943
8944 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8945 return rcStrict;
8946#endif /* IN_NEM_DARWIN */
8947}
8948
8949#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8950
8951/**
8952 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8953 */
8954HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8955{
8956 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8957
8958 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8959 | HMVMX_READ_EXIT_INSTR_INFO
8960 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8961 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8962 | CPUMCTX_EXTRN_HWVIRT
8963 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8964 AssertRCReturn(rc, rc);
8965
8966 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8967
8968 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
8969 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8970
8971 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8972 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8973 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8974 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8975 {
8976 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8977 rcStrict = VINF_SUCCESS;
8978 }
8979 return rcStrict;
8980}
8981
8982
8983/**
8984 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8985 */
8986HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8987{
8988 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8989
8990 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8991 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8992 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8993 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8994 AssertRCReturn(rc, rc);
8995
8996 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8997
8998 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8999 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9000 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9001 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9002 {
9003 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9004 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9005 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9006 }
9007 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9008 return rcStrict;
9009}
9010
9011
9012/**
9013 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9014 */
9015HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9016{
9017 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9018
9019 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9020 | HMVMX_READ_EXIT_INSTR_INFO
9021 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9022 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9023 | CPUMCTX_EXTRN_HWVIRT
9024 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9025 AssertRCReturn(rc, rc);
9026
9027 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9028
9029 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9030 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9031
9032 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9033 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9034 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9035 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9036 {
9037 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9038 rcStrict = VINF_SUCCESS;
9039 }
9040 return rcStrict;
9041}
9042
9043
9044/**
9045 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9046 */
9047HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9048{
9049 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9050
9051 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9052 | HMVMX_READ_EXIT_INSTR_INFO
9053 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9054 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9055 | CPUMCTX_EXTRN_HWVIRT
9056 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9057 AssertRCReturn(rc, rc);
9058
9059 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9060
9061 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9062 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9063
9064 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9065 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9066 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9067 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9068 {
9069 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9070 rcStrict = VINF_SUCCESS;
9071 }
9072 return rcStrict;
9073}
9074
9075
9076/**
9077 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9078 */
9079HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9080{
9081 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9082
9083 /*
9084 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9085 * thus might not need to import the shadow VMCS state, it's safer just in case
9086 * code elsewhere dares look at unsynced VMCS fields.
9087 */
9088 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9089 | HMVMX_READ_EXIT_INSTR_INFO
9090 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9091 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9092 | CPUMCTX_EXTRN_HWVIRT
9093 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9094 AssertRCReturn(rc, rc);
9095
9096 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9097
9098 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9099 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9100 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9101
9102 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9103 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9104 {
9105 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9106
9107# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9108 /* Try for exit optimization. This is on the following instruction
9109 because it would be a waste of time to have to reinterpret the
9110 already decoded vmwrite instruction. */
9111 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9112 if (pExitRec)
9113 {
9114 /* Frequent access or probing. */
9115 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9116 AssertRCReturn(rc, rc);
9117
9118 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9119 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9120 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9121 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9122 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9123 }
9124# endif
9125 }
9126 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9127 {
9128 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9129 rcStrict = VINF_SUCCESS;
9130 }
9131 return rcStrict;
9132}
9133
9134
9135/**
9136 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9137 */
9138HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9139{
9140 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9141
9142 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9143 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9144 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9145 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9146 AssertRCReturn(rc, rc);
9147
9148 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9149
9150 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9151 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9152 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9153 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9154 {
9155 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9156 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9157 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9158 }
9159 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9160 return rcStrict;
9161}
9162
9163
9164/**
9165 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9166 */
9167HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9168{
9169 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9170
9171 /*
9172 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9173 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9174 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9175 */
9176 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9177 | HMVMX_READ_EXIT_INSTR_INFO
9178 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9179 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9180 | CPUMCTX_EXTRN_HWVIRT
9181 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9182 AssertRCReturn(rc, rc);
9183
9184 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9185
9186 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9187 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9188 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9189
9190 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9191 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9192 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9193 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9194 {
9195 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9196 rcStrict = VINF_SUCCESS;
9197 }
9198 return rcStrict;
9199}
9200
9201
9202/**
9203 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9204 */
9205HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9206{
9207 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9208
9209 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9210 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9211 | CPUMCTX_EXTRN_HWVIRT
9212 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9213 AssertRCReturn(rc, rc);
9214
9215 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9216
9217 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9218 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9219 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9220 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9221 {
9222 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9223 rcStrict = VINF_SUCCESS;
9224 }
9225 return rcStrict;
9226}
9227
9228
9229/**
9230 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9231 */
9232HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9233{
9234 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9235
9236 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9237 | HMVMX_READ_EXIT_INSTR_INFO
9238 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9239 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9240 | CPUMCTX_EXTRN_HWVIRT
9241 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9242 AssertRCReturn(rc, rc);
9243
9244 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9245
9246 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9247 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9248
9249 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9250 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9251 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9252 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9253 {
9254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9255 rcStrict = VINF_SUCCESS;
9256 }
9257 return rcStrict;
9258}
9259
9260
9261/**
9262 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9263 */
9264HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9265{
9266 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9267
9268 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9269 | HMVMX_READ_EXIT_INSTR_INFO
9270 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9271 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9272 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9273 AssertRCReturn(rc, rc);
9274
9275 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9276
9277 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9278 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9279
9280 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9281 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9282 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9283 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9284 {
9285 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9286 rcStrict = VINF_SUCCESS;
9287 }
9288 return rcStrict;
9289}
9290
9291
9292# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9293/**
9294 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9295 */
9296HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9297{
9298 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9299
9300 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9301 | HMVMX_READ_EXIT_INSTR_INFO
9302 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9303 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9304 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9305 AssertRCReturn(rc, rc);
9306
9307 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9308
9309 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9310 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9311
9312 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9313 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9314 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9315 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9316 {
9317 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9318 rcStrict = VINF_SUCCESS;
9319 }
9320 return rcStrict;
9321}
9322# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9323#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9324/** @} */
9325
9326
9327#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9328/** @name Nested-guest VM-exit handlers.
9329 * @{
9330 */
9331/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9332/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9333/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9334
9335/**
9336 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9337 * Conditional VM-exit.
9338 */
9339HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9340{
9341 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9342
9343 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9344
9345 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9346 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9347 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9348
9349 switch (uExitIntType)
9350 {
9351# ifndef IN_NEM_DARWIN
9352 /*
9353 * Physical NMIs:
9354 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9355 */
9356 case VMX_EXIT_INT_INFO_TYPE_NMI:
9357 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9358# endif
9359
9360 /*
9361 * Hardware exceptions,
9362 * Software exceptions,
9363 * Privileged software exceptions:
9364 * Figure out if the exception must be delivered to the guest or the nested-guest.
9365 */
9366 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9367 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9368 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9369 {
9370 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9371 | HMVMX_READ_EXIT_INSTR_LEN
9372 | HMVMX_READ_IDT_VECTORING_INFO
9373 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9374
9375 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9376 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9377 {
9378 /* Exit qualification is required for debug and page-fault exceptions. */
9379 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9380
9381 /*
9382 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9383 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9384 * length. However, if delivery of a software interrupt, software exception or privileged
9385 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9386 */
9387 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9388 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9389 pVmxTransient->uExitIntErrorCode,
9390 pVmxTransient->uIdtVectoringInfo,
9391 pVmxTransient->uIdtVectoringErrorCode);
9392#ifdef DEBUG_ramshankar
9393 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9394 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9395 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9396 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9397 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9398 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9399#endif
9400 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9401 }
9402
9403 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9404 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9405 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9406 }
9407
9408 /*
9409 * Software interrupts:
9410 * VM-exits cannot be caused by software interrupts.
9411 *
9412 * External interrupts:
9413 * This should only happen when "acknowledge external interrupts on VM-exit"
9414 * control is set. However, we never set this when executing a guest or
9415 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9416 * the guest.
9417 */
9418 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9419 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9420 default:
9421 {
9422 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9423 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9424 }
9425 }
9426}
9427
9428
9429/**
9430 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9431 * Unconditional VM-exit.
9432 */
9433HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9434{
9435 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9436 return IEMExecVmxVmexitTripleFault(pVCpu);
9437}
9438
9439
9440/**
9441 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9442 */
9443HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9444{
9445 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9446
9447 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9448 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9449 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9450}
9451
9452
9453/**
9454 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9455 */
9456HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9457{
9458 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9459
9460 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9461 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9462 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9463}
9464
9465
9466/**
9467 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9468 * Unconditional VM-exit.
9469 */
9470HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9471{
9472 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9473
9474 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9475 | HMVMX_READ_EXIT_INSTR_LEN
9476 | HMVMX_READ_IDT_VECTORING_INFO
9477 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9478
9479 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9480 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
9481 pVmxTransient->uIdtVectoringErrorCode);
9482 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9483}
9484
9485
9486/**
9487 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9488 */
9489HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9490{
9491 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9492
9493 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9494 {
9495 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9496 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9497 }
9498 return vmxHCExitHlt(pVCpu, pVmxTransient);
9499}
9500
9501
9502/**
9503 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9504 */
9505HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9506{
9507 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9508
9509 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9510 {
9511 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9512 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9513 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9514 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9515 }
9516 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9517}
9518
9519
9520/**
9521 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9522 */
9523HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9524{
9525 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9526
9527 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9528 {
9529 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9530 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9531 }
9532 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9533}
9534
9535
9536/**
9537 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9538 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9539 */
9540HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9541{
9542 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9543
9544 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9545 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9546
9547 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
9548
9549 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9550 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9551 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9552
9553 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9554 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9555 u64VmcsField &= UINT64_C(0xffffffff);
9556
9557 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9558 {
9559 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9560 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9561 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9562 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9563 }
9564
9565 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9566 return vmxHCExitVmread(pVCpu, pVmxTransient);
9567 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9568}
9569
9570
9571/**
9572 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9573 */
9574HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9575{
9576 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9577
9578 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9579 {
9580 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9581 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9582 }
9583
9584 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9585}
9586
9587
9588/**
9589 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9590 * Conditional VM-exit.
9591 */
9592HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9593{
9594 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9595
9596 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9597 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9598
9599 VBOXSTRICTRC rcStrict;
9600 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9601 switch (uAccessType)
9602 {
9603 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9604 {
9605 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9606 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9607 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9608 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9609
9610 bool fIntercept;
9611 switch (iCrReg)
9612 {
9613 case 0:
9614 case 4:
9615 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9616 break;
9617
9618 case 3:
9619 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9620 break;
9621
9622 case 8:
9623 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9624 break;
9625
9626 default:
9627 fIntercept = false;
9628 break;
9629 }
9630 if (fIntercept)
9631 {
9632 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9633 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9634 }
9635 else
9636 {
9637 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9638 AssertRCReturn(rc, rc);
9639 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9640 }
9641 break;
9642 }
9643
9644 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9645 {
9646 /*
9647 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9648 * CR2 reads do not cause a VM-exit.
9649 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9650 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9651 */
9652 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9653 if ( iCrReg == 3
9654 || iCrReg == 8)
9655 {
9656 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9657 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9658 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9659 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9660 {
9661 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9662 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9663 }
9664 else
9665 {
9666 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9667 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9668 }
9669 }
9670 else
9671 {
9672 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9673 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9674 }
9675 break;
9676 }
9677
9678 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9679 {
9680 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9681 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9682 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9683 if ( (uGstHostMask & X86_CR0_TS)
9684 && (uReadShadow & X86_CR0_TS))
9685 {
9686 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9687 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9688 }
9689 else
9690 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9691 break;
9692 }
9693
9694 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9695 {
9696 RTGCPTR GCPtrEffDst;
9697 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9698 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9699 if (fMemOperand)
9700 {
9701 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
9702 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9703 }
9704 else
9705 GCPtrEffDst = NIL_RTGCPTR;
9706
9707 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9708 {
9709 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9710 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9711 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9712 }
9713 else
9714 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9715 break;
9716 }
9717
9718 default:
9719 {
9720 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9721 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9722 }
9723 }
9724
9725 if (rcStrict == VINF_IEM_RAISED_XCPT)
9726 {
9727 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9728 rcStrict = VINF_SUCCESS;
9729 }
9730 return rcStrict;
9731}
9732
9733
9734/**
9735 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9736 * Conditional VM-exit.
9737 */
9738HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9739{
9740 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9741
9742 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9743 {
9744 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9745 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9746 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9747 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9748 }
9749 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9750}
9751
9752
9753/**
9754 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9755 * Conditional VM-exit.
9756 */
9757HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9758{
9759 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9760
9761 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9762
9763 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9764 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9765 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9766
9767 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9768 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9769 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9770 {
9771 /*
9772 * IN/OUT instruction:
9773 * - Provides VM-exit instruction length.
9774 *
9775 * INS/OUTS instruction:
9776 * - Provides VM-exit instruction length.
9777 * - Provides Guest-linear address.
9778 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9779 */
9780 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9781 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9782
9783 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9784 pVmxTransient->ExitInstrInfo.u = 0;
9785 pVmxTransient->uGuestLinearAddr = 0;
9786
9787 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9788 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9789 if (fIOString)
9790 {
9791 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
9792 if (fVmxInsOutsInfo)
9793 {
9794 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9795 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
9796 }
9797 }
9798
9799 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
9800 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9801 }
9802 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9803}
9804
9805
9806/**
9807 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9808 */
9809HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9810{
9811 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9812
9813 uint32_t fMsrpm;
9814 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9815 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9816 else
9817 fMsrpm = VMXMSRPM_EXIT_RD;
9818
9819 if (fMsrpm & VMXMSRPM_EXIT_RD)
9820 {
9821 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9822 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9823 }
9824 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9825}
9826
9827
9828/**
9829 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9830 */
9831HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9832{
9833 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9834
9835 uint32_t fMsrpm;
9836 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9837 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9838 else
9839 fMsrpm = VMXMSRPM_EXIT_WR;
9840
9841 if (fMsrpm & VMXMSRPM_EXIT_WR)
9842 {
9843 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9844 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9845 }
9846 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9847}
9848
9849
9850/**
9851 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9852 */
9853HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9854{
9855 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9856
9857 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9858 {
9859 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9860 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9861 }
9862 return vmxHCExitMwait(pVCpu, pVmxTransient);
9863}
9864
9865
9866/**
9867 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9868 * VM-exit.
9869 */
9870HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9871{
9872 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9873
9874 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9875 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
9876 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
9877 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9878}
9879
9880
9881/**
9882 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9883 */
9884HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9885{
9886 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9887
9888 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9889 {
9890 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9891 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9892 }
9893 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9894}
9895
9896
9897/**
9898 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9899 */
9900HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9901{
9902 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9903
9904 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9905 * PAUSE when executing a nested-guest? If it does not, we would not need
9906 * to check for the intercepts here. Just call VM-exit... */
9907
9908 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9909 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9910 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9911 {
9912 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9913 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9914 }
9915 return vmxHCExitPause(pVCpu, pVmxTransient);
9916}
9917
9918
9919/**
9920 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9921 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9922 */
9923HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9924{
9925 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9926
9927 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9928 {
9929 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
9930 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
9931 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9932 }
9933 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9934}
9935
9936
9937/**
9938 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9939 * VM-exit.
9940 */
9941HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9942{
9943 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9944
9945 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9946 | HMVMX_READ_EXIT_INSTR_LEN
9947 | HMVMX_READ_IDT_VECTORING_INFO
9948 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9949
9950 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9951
9952 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
9953 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
9954
9955 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9956 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
9957 pVmxTransient->uIdtVectoringErrorCode);
9958 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
9959}
9960
9961
9962/**
9963 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
9964 * Conditional VM-exit.
9965 */
9966HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9967{
9968 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9969
9970 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
9971 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9972 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
9973}
9974
9975
9976/**
9977 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
9978 * Conditional VM-exit.
9979 */
9980HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9981{
9982 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9983
9984 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
9985 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9986 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
9987}
9988
9989
9990/**
9991 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
9992 */
9993HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9994{
9995 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9996
9997 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9998 {
9999 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10000 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10001 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10002 }
10003 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10004}
10005
10006
10007/**
10008 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10009 */
10010HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10011{
10012 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10013
10014 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10015 {
10016 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10017 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10018 }
10019 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10020}
10021
10022
10023/**
10024 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10025 */
10026HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10027{
10028 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10029
10030 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10031 {
10032 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10033 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10034 | HMVMX_READ_EXIT_INSTR_INFO
10035 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10036 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10037 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10038 }
10039 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10040}
10041
10042
10043/**
10044 * Nested-guest VM-exit handler for invalid-guest state
10045 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10046 */
10047HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10048{
10049 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10050
10051 /*
10052 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10053 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10054 * Handle it like it's in an invalid guest state of the outer guest.
10055 *
10056 * When the fast path is implemented, this should be changed to cause the corresponding
10057 * nested-guest VM-exit.
10058 */
10059 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10060}
10061
10062
10063/**
10064 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10065 * and only provide the instruction length.
10066 *
10067 * Unconditional VM-exit.
10068 */
10069HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10070{
10071 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10072
10073#ifdef VBOX_STRICT
10074 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10075 switch (pVmxTransient->uExitReason)
10076 {
10077 case VMX_EXIT_ENCLS:
10078 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10079 break;
10080
10081 case VMX_EXIT_VMFUNC:
10082 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10083 break;
10084 }
10085#endif
10086
10087 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10088 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10089}
10090
10091
10092/**
10093 * Nested-guest VM-exit handler for instructions that provide instruction length as
10094 * well as more information.
10095 *
10096 * Unconditional VM-exit.
10097 */
10098HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10099{
10100 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10101
10102# ifdef VBOX_STRICT
10103 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10104 switch (pVmxTransient->uExitReason)
10105 {
10106 case VMX_EXIT_GDTR_IDTR_ACCESS:
10107 case VMX_EXIT_LDTR_TR_ACCESS:
10108 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10109 break;
10110
10111 case VMX_EXIT_RDRAND:
10112 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10113 break;
10114
10115 case VMX_EXIT_RDSEED:
10116 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10117 break;
10118
10119 case VMX_EXIT_XSAVES:
10120 case VMX_EXIT_XRSTORS:
10121 /** @todo NSTVMX: Verify XSS-bitmap. */
10122 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10123 break;
10124
10125 case VMX_EXIT_UMWAIT:
10126 case VMX_EXIT_TPAUSE:
10127 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10128 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10129 break;
10130
10131 case VMX_EXIT_LOADIWKEY:
10132 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10133 break;
10134 }
10135# endif
10136
10137 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10138 | HMVMX_READ_EXIT_INSTR_LEN
10139 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10140 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10141 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10142}
10143
10144# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10145
10146/**
10147 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10148 * Conditional VM-exit.
10149 */
10150HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10151{
10152 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10153 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10154
10155 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10156 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10157 {
10158 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10159 AssertRCReturn(rc, rc);
10160
10161 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10162 | HMVMX_READ_EXIT_INSTR_LEN
10163 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10164 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10165 | HMVMX_READ_IDT_VECTORING_INFO
10166 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10167 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10168
10169 /*
10170 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10171 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10172 * it's its problem to deal with that issue and we'll clear the recovered event.
10173 */
10174 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10175 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10176 { /*likely*/ }
10177 else
10178 {
10179 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10180 return rcStrict;
10181 }
10182 bool const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10183
10184 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10185 uint64_t const uExitQual = pVmxTransient->uExitQual;
10186
10187 RTGCPTR GCPtrNestedFault;
10188 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10189 if (fIsLinearAddrValid)
10190 {
10191 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10192 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10193 }
10194 else
10195 GCPtrNestedFault = 0;
10196
10197 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10198 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10199 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10200 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10201 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10202
10203 PGMPTWALK Walk;
10204 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10205 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx),
10206 GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault,
10207 &Walk);
10208 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10209 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10210 if (RT_SUCCESS(rcStrict))
10211 return rcStrict;
10212
10213 if (fClearEventOnForward)
10214 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10215
10216 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10217 pVmxTransient->uIdtVectoringErrorCode);
10218 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10219 {
10220 VMXVEXITINFO const ExitInfo
10221 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10222 pVmxTransient->uExitQual,
10223 pVmxTransient->cbExitInstr,
10224 pVmxTransient->uGuestLinearAddr,
10225 pVmxTransient->uGuestPhysicalAddr);
10226 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10227 }
10228
10229 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10230 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10231 }
10232
10233 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10234}
10235
10236
10237/**
10238 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10239 * Conditional VM-exit.
10240 */
10241HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10242{
10243 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10244 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10245
10246 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10247 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10248 {
10249 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_ALL);
10250 AssertRCReturn(rc, rc);
10251
10252 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10253
10254 PGMPTWALK Walk;
10255 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10256 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10257 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10258 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10259 0 /* GCPtrNestedFault */, &Walk);
10260 if (RT_SUCCESS(rcStrict))
10261 {
10262 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10263 return rcStrict;
10264 }
10265
10266 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10267 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10268 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10269
10270 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10271 pVmxTransient->uIdtVectoringErrorCode);
10272 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10273 }
10274
10275 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10276}
10277
10278# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10279
10280/** @} */
10281#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10282
10283
10284/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10285 * probes.
10286 *
10287 * The following few functions and associated structure contains the bloat
10288 * necessary for providing detailed debug events and dtrace probes as well as
10289 * reliable host side single stepping. This works on the principle of
10290 * "subclassing" the normal execution loop and workers. We replace the loop
10291 * method completely and override selected helpers to add necessary adjustments
10292 * to their core operation.
10293 *
10294 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10295 * any performance for debug and analysis features.
10296 *
10297 * @{
10298 */
10299
10300/**
10301 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10302 * the debug run loop.
10303 */
10304typedef struct VMXRUNDBGSTATE
10305{
10306 /** The RIP we started executing at. This is for detecting that we stepped. */
10307 uint64_t uRipStart;
10308 /** The CS we started executing with. */
10309 uint16_t uCsStart;
10310
10311 /** Whether we've actually modified the 1st execution control field. */
10312 bool fModifiedProcCtls : 1;
10313 /** Whether we've actually modified the 2nd execution control field. */
10314 bool fModifiedProcCtls2 : 1;
10315 /** Whether we've actually modified the exception bitmap. */
10316 bool fModifiedXcptBitmap : 1;
10317
10318 /** We desire the modified the CR0 mask to be cleared. */
10319 bool fClearCr0Mask : 1;
10320 /** We desire the modified the CR4 mask to be cleared. */
10321 bool fClearCr4Mask : 1;
10322 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10323 uint32_t fCpe1Extra;
10324 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10325 uint32_t fCpe1Unwanted;
10326 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10327 uint32_t fCpe2Extra;
10328 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10329 uint32_t bmXcptExtra;
10330 /** The sequence number of the Dtrace provider settings the state was
10331 * configured against. */
10332 uint32_t uDtraceSettingsSeqNo;
10333 /** VM-exits to check (one bit per VM-exit). */
10334 uint32_t bmExitsToCheck[3];
10335
10336 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10337 uint32_t fProcCtlsInitial;
10338 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10339 uint32_t fProcCtls2Initial;
10340 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10341 uint32_t bmXcptInitial;
10342} VMXRUNDBGSTATE;
10343AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10344typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10345
10346
10347/**
10348 * Initializes the VMXRUNDBGSTATE structure.
10349 *
10350 * @param pVCpu The cross context virtual CPU structure of the
10351 * calling EMT.
10352 * @param pVmxTransient The VMX-transient structure.
10353 * @param pDbgState The debug state to initialize.
10354 */
10355static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10356{
10357 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10358 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10359
10360 pDbgState->fModifiedProcCtls = false;
10361 pDbgState->fModifiedProcCtls2 = false;
10362 pDbgState->fModifiedXcptBitmap = false;
10363 pDbgState->fClearCr0Mask = false;
10364 pDbgState->fClearCr4Mask = false;
10365 pDbgState->fCpe1Extra = 0;
10366 pDbgState->fCpe1Unwanted = 0;
10367 pDbgState->fCpe2Extra = 0;
10368 pDbgState->bmXcptExtra = 0;
10369 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10370 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10371 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10372}
10373
10374
10375/**
10376 * Updates the VMSC fields with changes requested by @a pDbgState.
10377 *
10378 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10379 * immediately before executing guest code, i.e. when interrupts are disabled.
10380 * We don't check status codes here as we cannot easily assert or return in the
10381 * latter case.
10382 *
10383 * @param pVCpu The cross context virtual CPU structure.
10384 * @param pVmxTransient The VMX-transient structure.
10385 * @param pDbgState The debug state.
10386 */
10387static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10388{
10389 /*
10390 * Ensure desired flags in VMCS control fields are set.
10391 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10392 *
10393 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10394 * there should be no stale data in pCtx at this point.
10395 */
10396 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10397 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10398 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10399 {
10400 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10401 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10402 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10403 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10404 pDbgState->fModifiedProcCtls = true;
10405 }
10406
10407 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10408 {
10409 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10410 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10411 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10412 pDbgState->fModifiedProcCtls2 = true;
10413 }
10414
10415 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10416 {
10417 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10418 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10419 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10420 pDbgState->fModifiedXcptBitmap = true;
10421 }
10422
10423 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10424 {
10425 pVmcsInfo->u64Cr0Mask = 0;
10426 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10427 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10428 }
10429
10430 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10431 {
10432 pVmcsInfo->u64Cr4Mask = 0;
10433 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10434 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10435 }
10436
10437 NOREF(pVCpu);
10438}
10439
10440
10441/**
10442 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10443 * re-entry next time around.
10444 *
10445 * @returns Strict VBox status code (i.e. informational status codes too).
10446 * @param pVCpu The cross context virtual CPU structure.
10447 * @param pVmxTransient The VMX-transient structure.
10448 * @param pDbgState The debug state.
10449 * @param rcStrict The return code from executing the guest using single
10450 * stepping.
10451 */
10452static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10453 VBOXSTRICTRC rcStrict)
10454{
10455 /*
10456 * Restore VM-exit control settings as we may not reenter this function the
10457 * next time around.
10458 */
10459 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10460
10461 /* We reload the initial value, trigger what we can of recalculations the
10462 next time around. From the looks of things, that's all that's required atm. */
10463 if (pDbgState->fModifiedProcCtls)
10464 {
10465 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10466 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10467 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10468 AssertRC(rc2);
10469 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10470 }
10471
10472 /* We're currently the only ones messing with this one, so just restore the
10473 cached value and reload the field. */
10474 if ( pDbgState->fModifiedProcCtls2
10475 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10476 {
10477 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10478 AssertRC(rc2);
10479 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10480 }
10481
10482 /* If we've modified the exception bitmap, we restore it and trigger
10483 reloading and partial recalculation the next time around. */
10484 if (pDbgState->fModifiedXcptBitmap)
10485 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10486
10487 return rcStrict;
10488}
10489
10490
10491/**
10492 * Configures VM-exit controls for current DBGF and DTrace settings.
10493 *
10494 * This updates @a pDbgState and the VMCS execution control fields to reflect
10495 * the necessary VM-exits demanded by DBGF and DTrace.
10496 *
10497 * @param pVCpu The cross context virtual CPU structure.
10498 * @param pVmxTransient The VMX-transient structure. May update
10499 * fUpdatedTscOffsettingAndPreemptTimer.
10500 * @param pDbgState The debug state.
10501 */
10502static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10503{
10504#ifndef IN_NEM_DARWIN
10505 /*
10506 * Take down the dtrace serial number so we can spot changes.
10507 */
10508 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10509 ASMCompilerBarrier();
10510#endif
10511
10512 /*
10513 * We'll rebuild most of the middle block of data members (holding the
10514 * current settings) as we go along here, so start by clearing it all.
10515 */
10516 pDbgState->bmXcptExtra = 0;
10517 pDbgState->fCpe1Extra = 0;
10518 pDbgState->fCpe1Unwanted = 0;
10519 pDbgState->fCpe2Extra = 0;
10520 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10521 pDbgState->bmExitsToCheck[i] = 0;
10522
10523 /*
10524 * Software interrupts (INT XXh) - no idea how to trigger these...
10525 */
10526 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10527 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10528 || VBOXVMM_INT_SOFTWARE_ENABLED())
10529 {
10530 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10531 }
10532
10533 /*
10534 * INT3 breakpoints - triggered by #BP exceptions.
10535 */
10536 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10537 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10538
10539 /*
10540 * Exception bitmap and XCPT events+probes.
10541 */
10542 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10543 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10544 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10545
10546 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10547 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10548 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10549 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10550 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10551 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10552 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10553 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10554 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10555 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10556 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10557 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10558 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10559 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10560 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10561 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10562 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10563 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10564
10565 if (pDbgState->bmXcptExtra)
10566 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10567
10568 /*
10569 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10570 *
10571 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10572 * So, when adding/changing/removing please don't forget to update it.
10573 *
10574 * Some of the macros are picking up local variables to save horizontal space,
10575 * (being able to see it in a table is the lesser evil here).
10576 */
10577#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10578 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10579 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10580#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10581 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10582 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10583 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10584 } else do { } while (0)
10585#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10586 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10587 { \
10588 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10589 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10590 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10591 } else do { } while (0)
10592#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10593 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10594 { \
10595 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10596 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10597 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10598 } else do { } while (0)
10599#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10600 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10601 { \
10602 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10603 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10604 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10605 } else do { } while (0)
10606
10607 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10608 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10609 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10610 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10611 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10612
10613 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10614 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10615 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10616 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10617 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10618 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10619 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10620 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10621 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10622 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10623 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10624 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10625 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10626 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10627 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10628 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10629 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10630 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10631 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10632 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10633 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10634 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10635 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10636 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10637 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10638 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10639 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10640 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10641 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10642 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10643 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10644 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10645 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10646 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10647 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10648 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10649
10650 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10651 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10652 {
10653 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10654 | CPUMCTX_EXTRN_APIC_TPR);
10655 AssertRC(rc);
10656
10657#if 0 /** @todo fix me */
10658 pDbgState->fClearCr0Mask = true;
10659 pDbgState->fClearCr4Mask = true;
10660#endif
10661 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10662 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10663 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10664 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10665 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10666 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10667 require clearing here and in the loop if we start using it. */
10668 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10669 }
10670 else
10671 {
10672 if (pDbgState->fClearCr0Mask)
10673 {
10674 pDbgState->fClearCr0Mask = false;
10675 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10676 }
10677 if (pDbgState->fClearCr4Mask)
10678 {
10679 pDbgState->fClearCr4Mask = false;
10680 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10681 }
10682 }
10683 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10684 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10685
10686 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10687 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10688 {
10689 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10690 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10691 }
10692 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10693 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10694
10695 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10696 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10697 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10698 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10699 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10700 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10701 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10702 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10703#if 0 /** @todo too slow, fix handler. */
10704 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10705#endif
10706 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10707
10708 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10709 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10710 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10711 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10712 {
10713 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10714 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10715 }
10716 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10717 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10718 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10719 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10720
10721 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10722 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10723 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10724 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10725 {
10726 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10727 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10728 }
10729 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10730 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10731 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10732 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10733
10734 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10735 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10736 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10737 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10738 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10739 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10740 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10741 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10742 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10743 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10744 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10745 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10746 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10747 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10748 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10749 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10750 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10751 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10752 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10753 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10754 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10755 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10756
10757#undef IS_EITHER_ENABLED
10758#undef SET_ONLY_XBM_IF_EITHER_EN
10759#undef SET_CPE1_XBM_IF_EITHER_EN
10760#undef SET_CPEU_XBM_IF_EITHER_EN
10761#undef SET_CPE2_XBM_IF_EITHER_EN
10762
10763 /*
10764 * Sanitize the control stuff.
10765 */
10766 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10767 if (pDbgState->fCpe2Extra)
10768 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10769 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10770 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10771#ifndef IN_NEM_DARWIN
10772 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10773 {
10774 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10775 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10776 }
10777#else
10778 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10779 {
10780 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10781 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10782 }
10783#endif
10784
10785 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10786 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10787 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10788 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10789}
10790
10791
10792/**
10793 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10794 * appropriate.
10795 *
10796 * The caller has checked the VM-exit against the
10797 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10798 * already, so we don't have to do that either.
10799 *
10800 * @returns Strict VBox status code (i.e. informational status codes too).
10801 * @param pVCpu The cross context virtual CPU structure.
10802 * @param pVmxTransient The VMX-transient structure.
10803 * @param uExitReason The VM-exit reason.
10804 *
10805 * @remarks The name of this function is displayed by dtrace, so keep it short
10806 * and to the point. No longer than 33 chars long, please.
10807 */
10808static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10809{
10810 /*
10811 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10812 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10813 *
10814 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10815 * does. Must add/change/remove both places. Same ordering, please.
10816 *
10817 * Added/removed events must also be reflected in the next section
10818 * where we dispatch dtrace events.
10819 */
10820 bool fDtrace1 = false;
10821 bool fDtrace2 = false;
10822 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10823 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10824 uint32_t uEventArg = 0;
10825#define SET_EXIT(a_EventSubName) \
10826 do { \
10827 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10828 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10829 } while (0)
10830#define SET_BOTH(a_EventSubName) \
10831 do { \
10832 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10833 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10834 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10835 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10836 } while (0)
10837 switch (uExitReason)
10838 {
10839 case VMX_EXIT_MTF:
10840 return vmxHCExitMtf(pVCpu, pVmxTransient);
10841
10842 case VMX_EXIT_XCPT_OR_NMI:
10843 {
10844 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10845 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10846 {
10847 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10848 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10849 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10850 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10851 {
10852 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10853 {
10854 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
10855 uEventArg = pVmxTransient->uExitIntErrorCode;
10856 }
10857 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10858 switch (enmEvent1)
10859 {
10860 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10861 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10862 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10863 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10864 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10865 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10866 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10867 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10868 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10869 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10870 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10871 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10872 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10873 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10874 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10875 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10876 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10877 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10878 default: break;
10879 }
10880 }
10881 else
10882 AssertFailed();
10883 break;
10884
10885 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10886 uEventArg = idxVector;
10887 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10888 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10889 break;
10890 }
10891 break;
10892 }
10893
10894 case VMX_EXIT_TRIPLE_FAULT:
10895 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
10896 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
10897 break;
10898 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
10899 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
10900 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
10901 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
10902 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
10903
10904 /* Instruction specific VM-exits: */
10905 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
10906 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
10907 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
10908 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
10909 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
10910 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
10911 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
10912 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
10913 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
10914 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
10915 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
10916 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
10917 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
10918 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
10919 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
10920 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
10921 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
10922 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
10923 case VMX_EXIT_MOV_CRX:
10924 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10925 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
10926 SET_BOTH(CRX_READ);
10927 else
10928 SET_BOTH(CRX_WRITE);
10929 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10930 break;
10931 case VMX_EXIT_MOV_DRX:
10932 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10933 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
10934 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
10935 SET_BOTH(DRX_READ);
10936 else
10937 SET_BOTH(DRX_WRITE);
10938 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
10939 break;
10940 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
10941 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
10942 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
10943 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
10944 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
10945 case VMX_EXIT_GDTR_IDTR_ACCESS:
10946 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10947 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
10948 {
10949 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
10950 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
10951 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
10952 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
10953 }
10954 break;
10955
10956 case VMX_EXIT_LDTR_TR_ACCESS:
10957 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10958 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
10959 {
10960 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
10961 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
10962 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
10963 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
10964 }
10965 break;
10966
10967 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
10968 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
10969 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
10970 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
10971 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
10972 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
10973 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
10974 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
10975 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
10976 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
10977 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
10978
10979 /* Events that aren't relevant at this point. */
10980 case VMX_EXIT_EXT_INT:
10981 case VMX_EXIT_INT_WINDOW:
10982 case VMX_EXIT_NMI_WINDOW:
10983 case VMX_EXIT_TPR_BELOW_THRESHOLD:
10984 case VMX_EXIT_PREEMPT_TIMER:
10985 case VMX_EXIT_IO_INSTR:
10986 break;
10987
10988 /* Errors and unexpected events. */
10989 case VMX_EXIT_INIT_SIGNAL:
10990 case VMX_EXIT_SIPI:
10991 case VMX_EXIT_IO_SMI:
10992 case VMX_EXIT_SMI:
10993 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
10994 case VMX_EXIT_ERR_MSR_LOAD:
10995 case VMX_EXIT_ERR_MACHINE_CHECK:
10996 case VMX_EXIT_PML_FULL:
10997 case VMX_EXIT_VIRTUALIZED_EOI:
10998 break;
10999
11000 default:
11001 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11002 break;
11003 }
11004#undef SET_BOTH
11005#undef SET_EXIT
11006
11007 /*
11008 * Dtrace tracepoints go first. We do them here at once so we don't
11009 * have to copy the guest state saving and stuff a few dozen times.
11010 * Down side is that we've got to repeat the switch, though this time
11011 * we use enmEvent since the probes are a subset of what DBGF does.
11012 */
11013 if (fDtrace1 || fDtrace2)
11014 {
11015 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11016 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11017 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11018 switch (enmEvent1)
11019 {
11020 /** @todo consider which extra parameters would be helpful for each probe. */
11021 case DBGFEVENT_END: break;
11022 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11023 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11024 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11025 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11026 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11027 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11028 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11029 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11030 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11031 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11032 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11033 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11034 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11035 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11036 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11037 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11038 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11039 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11040 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11041 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11042 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11043 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11044 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11045 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11046 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11047 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11048 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11049 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11050 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11051 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11052 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11053 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11054 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11055 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11056 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11057 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11058 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11059 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11060 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11061 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11062 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11063 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11064 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11065 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11066 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11067 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11068 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11069 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11070 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11071 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11072 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11073 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11074 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11075 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11076 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11077 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11078 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11079 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11080 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11081 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11082 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11083 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11084 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11085 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11086 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11087 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11088 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11089 }
11090 switch (enmEvent2)
11091 {
11092 /** @todo consider which extra parameters would be helpful for each probe. */
11093 case DBGFEVENT_END: break;
11094 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11095 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11096 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11097 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11098 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11099 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11100 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11101 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11102 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11103 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11104 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11105 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11106 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11107 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11108 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11109 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11110 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11111 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11112 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11113 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11114 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11115 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11116 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11117 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11118 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11119 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11120 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11121 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11122 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11123 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11124 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11125 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11126 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11127 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11128 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11129 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11130 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11131 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11132 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11133 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11134 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11135 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11136 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11137 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11138 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11139 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11140 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11141 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11142 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11143 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11144 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11145 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11146 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11147 }
11148 }
11149
11150 /*
11151 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11152 * the DBGF call will do a full check).
11153 *
11154 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11155 * Note! If we have to events, we prioritize the first, i.e. the instruction
11156 * one, in order to avoid event nesting.
11157 */
11158 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11159 if ( enmEvent1 != DBGFEVENT_END
11160 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11161 {
11162 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11163 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11164 if (rcStrict != VINF_SUCCESS)
11165 return rcStrict;
11166 }
11167 else if ( enmEvent2 != DBGFEVENT_END
11168 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11169 {
11170 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11171 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11172 if (rcStrict != VINF_SUCCESS)
11173 return rcStrict;
11174 }
11175
11176 return VINF_SUCCESS;
11177}
11178
11179
11180/**
11181 * Single-stepping VM-exit filtering.
11182 *
11183 * This is preprocessing the VM-exits and deciding whether we've gotten far
11184 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11185 * handling is performed.
11186 *
11187 * @returns Strict VBox status code (i.e. informational status codes too).
11188 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11189 * @param pVmxTransient The VMX-transient structure.
11190 * @param pDbgState The debug state.
11191 */
11192DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11193{
11194 /*
11195 * Expensive (saves context) generic dtrace VM-exit probe.
11196 */
11197 uint32_t const uExitReason = pVmxTransient->uExitReason;
11198 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11199 { /* more likely */ }
11200 else
11201 {
11202 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11203 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11204 AssertRC(rc);
11205 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11206 }
11207
11208#ifndef IN_NEM_DARWIN
11209 /*
11210 * Check for host NMI, just to get that out of the way.
11211 */
11212 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11213 { /* normally likely */ }
11214 else
11215 {
11216 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11217 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11218 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11219 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11220 }
11221#endif
11222
11223 /*
11224 * Check for single stepping event if we're stepping.
11225 */
11226 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11227 {
11228 switch (uExitReason)
11229 {
11230 case VMX_EXIT_MTF:
11231 return vmxHCExitMtf(pVCpu, pVmxTransient);
11232
11233 /* Various events: */
11234 case VMX_EXIT_XCPT_OR_NMI:
11235 case VMX_EXIT_EXT_INT:
11236 case VMX_EXIT_TRIPLE_FAULT:
11237 case VMX_EXIT_INT_WINDOW:
11238 case VMX_EXIT_NMI_WINDOW:
11239 case VMX_EXIT_TASK_SWITCH:
11240 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11241 case VMX_EXIT_APIC_ACCESS:
11242 case VMX_EXIT_EPT_VIOLATION:
11243 case VMX_EXIT_EPT_MISCONFIG:
11244 case VMX_EXIT_PREEMPT_TIMER:
11245
11246 /* Instruction specific VM-exits: */
11247 case VMX_EXIT_CPUID:
11248 case VMX_EXIT_GETSEC:
11249 case VMX_EXIT_HLT:
11250 case VMX_EXIT_INVD:
11251 case VMX_EXIT_INVLPG:
11252 case VMX_EXIT_RDPMC:
11253 case VMX_EXIT_RDTSC:
11254 case VMX_EXIT_RSM:
11255 case VMX_EXIT_VMCALL:
11256 case VMX_EXIT_VMCLEAR:
11257 case VMX_EXIT_VMLAUNCH:
11258 case VMX_EXIT_VMPTRLD:
11259 case VMX_EXIT_VMPTRST:
11260 case VMX_EXIT_VMREAD:
11261 case VMX_EXIT_VMRESUME:
11262 case VMX_EXIT_VMWRITE:
11263 case VMX_EXIT_VMXOFF:
11264 case VMX_EXIT_VMXON:
11265 case VMX_EXIT_MOV_CRX:
11266 case VMX_EXIT_MOV_DRX:
11267 case VMX_EXIT_IO_INSTR:
11268 case VMX_EXIT_RDMSR:
11269 case VMX_EXIT_WRMSR:
11270 case VMX_EXIT_MWAIT:
11271 case VMX_EXIT_MONITOR:
11272 case VMX_EXIT_PAUSE:
11273 case VMX_EXIT_GDTR_IDTR_ACCESS:
11274 case VMX_EXIT_LDTR_TR_ACCESS:
11275 case VMX_EXIT_INVEPT:
11276 case VMX_EXIT_RDTSCP:
11277 case VMX_EXIT_INVVPID:
11278 case VMX_EXIT_WBINVD:
11279 case VMX_EXIT_XSETBV:
11280 case VMX_EXIT_RDRAND:
11281 case VMX_EXIT_INVPCID:
11282 case VMX_EXIT_VMFUNC:
11283 case VMX_EXIT_RDSEED:
11284 case VMX_EXIT_XSAVES:
11285 case VMX_EXIT_XRSTORS:
11286 {
11287 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11288 AssertRCReturn(rc, rc);
11289 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11290 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11291 return VINF_EM_DBG_STEPPED;
11292 break;
11293 }
11294
11295 /* Errors and unexpected events: */
11296 case VMX_EXIT_INIT_SIGNAL:
11297 case VMX_EXIT_SIPI:
11298 case VMX_EXIT_IO_SMI:
11299 case VMX_EXIT_SMI:
11300 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11301 case VMX_EXIT_ERR_MSR_LOAD:
11302 case VMX_EXIT_ERR_MACHINE_CHECK:
11303 case VMX_EXIT_PML_FULL:
11304 case VMX_EXIT_VIRTUALIZED_EOI:
11305 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11306 break;
11307
11308 default:
11309 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11310 break;
11311 }
11312 }
11313
11314 /*
11315 * Check for debugger event breakpoints and dtrace probes.
11316 */
11317 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11318 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11319 {
11320 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11321 if (rcStrict != VINF_SUCCESS)
11322 return rcStrict;
11323 }
11324
11325 /*
11326 * Normal processing.
11327 */
11328#ifdef HMVMX_USE_FUNCTION_TABLE
11329 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11330#else
11331 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11332#endif
11333}
11334
11335/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette