VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97248

Last change on this file since 97248 was 97248, checked in by vboxsync, 2 years ago

VMM: Nested VMX: bugref:10092 Build fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 521.8 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97248 2022-10-20 09:46:56Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330
331 /* 16-bit guest-state fields. */
332 VMX_VMCS16_GUEST_ES_SEL,
333 VMX_VMCS16_GUEST_CS_SEL,
334 VMX_VMCS16_GUEST_SS_SEL,
335 VMX_VMCS16_GUEST_DS_SEL,
336 VMX_VMCS16_GUEST_FS_SEL,
337 VMX_VMCS16_GUEST_GS_SEL,
338 VMX_VMCS16_GUEST_LDTR_SEL,
339 VMX_VMCS16_GUEST_TR_SEL,
340 VMX_VMCS16_GUEST_INTR_STATUS,
341 VMX_VMCS16_GUEST_PML_INDEX,
342
343 /* 16-bits host-state fields. */
344 VMX_VMCS16_HOST_ES_SEL,
345 VMX_VMCS16_HOST_CS_SEL,
346 VMX_VMCS16_HOST_SS_SEL,
347 VMX_VMCS16_HOST_DS_SEL,
348 VMX_VMCS16_HOST_FS_SEL,
349 VMX_VMCS16_HOST_GS_SEL,
350 VMX_VMCS16_HOST_TR_SEL,
351
352 /* 64-bit control fields. */
353 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
355 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
357 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
358 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
361 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
365 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
367 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
369 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
370 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
371 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
373 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
375 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
377 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
379 VMX_VMCS64_CTRL_EPTP_FULL,
380 VMX_VMCS64_CTRL_EPTP_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
389 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
390 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
391 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
395 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
397 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
401 VMX_VMCS64_CTRL_SPPTP_FULL,
402 VMX_VMCS64_CTRL_SPPTP_HIGH,
403 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
405 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
406 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
407 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
409
410 /* 64-bit read-only data fields. */
411 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
413
414 /* 64-bit guest-state fields. */
415 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
417 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
418 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
419 VMX_VMCS64_GUEST_PAT_FULL,
420 VMX_VMCS64_GUEST_PAT_HIGH,
421 VMX_VMCS64_GUEST_EFER_FULL,
422 VMX_VMCS64_GUEST_EFER_HIGH,
423 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
425 VMX_VMCS64_GUEST_PDPTE0_FULL,
426 VMX_VMCS64_GUEST_PDPTE0_HIGH,
427 VMX_VMCS64_GUEST_PDPTE1_FULL,
428 VMX_VMCS64_GUEST_PDPTE1_HIGH,
429 VMX_VMCS64_GUEST_PDPTE2_FULL,
430 VMX_VMCS64_GUEST_PDPTE2_HIGH,
431 VMX_VMCS64_GUEST_PDPTE3_FULL,
432 VMX_VMCS64_GUEST_PDPTE3_HIGH,
433 VMX_VMCS64_GUEST_BNDCFGS_FULL,
434 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
435 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
436 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
437 VMX_VMCS64_GUEST_PKRS_FULL,
438 VMX_VMCS64_GUEST_PKRS_HIGH,
439
440 /* 64-bit host-state fields. */
441 VMX_VMCS64_HOST_PAT_FULL,
442 VMX_VMCS64_HOST_PAT_HIGH,
443 VMX_VMCS64_HOST_EFER_FULL,
444 VMX_VMCS64_HOST_EFER_HIGH,
445 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
447 VMX_VMCS64_HOST_PKRS_FULL,
448 VMX_VMCS64_HOST_PKRS_HIGH,
449
450 /* 32-bit control fields. */
451 VMX_VMCS32_CTRL_PIN_EXEC,
452 VMX_VMCS32_CTRL_PROC_EXEC,
453 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
454 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
456 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
457 VMX_VMCS32_CTRL_EXIT,
458 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
459 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY,
461 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
462 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
463 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
464 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
465 VMX_VMCS32_CTRL_TPR_THRESHOLD,
466 VMX_VMCS32_CTRL_PROC_EXEC2,
467 VMX_VMCS32_CTRL_PLE_GAP,
468 VMX_VMCS32_CTRL_PLE_WINDOW,
469
470 /* 32-bits read-only fields. */
471 VMX_VMCS32_RO_VM_INSTR_ERROR,
472 VMX_VMCS32_RO_EXIT_REASON,
473 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
475 VMX_VMCS32_RO_IDT_VECTORING_INFO,
476 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
477 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
478 VMX_VMCS32_RO_EXIT_INSTR_INFO,
479
480 /* 32-bit guest-state fields. */
481 VMX_VMCS32_GUEST_ES_LIMIT,
482 VMX_VMCS32_GUEST_CS_LIMIT,
483 VMX_VMCS32_GUEST_SS_LIMIT,
484 VMX_VMCS32_GUEST_DS_LIMIT,
485 VMX_VMCS32_GUEST_FS_LIMIT,
486 VMX_VMCS32_GUEST_GS_LIMIT,
487 VMX_VMCS32_GUEST_LDTR_LIMIT,
488 VMX_VMCS32_GUEST_TR_LIMIT,
489 VMX_VMCS32_GUEST_GDTR_LIMIT,
490 VMX_VMCS32_GUEST_IDTR_LIMIT,
491 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_INT_STATE,
500 VMX_VMCS32_GUEST_ACTIVITY_STATE,
501 VMX_VMCS32_GUEST_SMBASE,
502 VMX_VMCS32_GUEST_SYSENTER_CS,
503 VMX_VMCS32_PREEMPT_TIMER_VALUE,
504
505 /* 32-bit host-state fields. */
506 VMX_VMCS32_HOST_SYSENTER_CS,
507
508 /* Natural-width control fields. */
509 VMX_VMCS_CTRL_CR0_MASK,
510 VMX_VMCS_CTRL_CR4_MASK,
511 VMX_VMCS_CTRL_CR0_READ_SHADOW,
512 VMX_VMCS_CTRL_CR4_READ_SHADOW,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
517
518 /* Natural-width read-only data fields. */
519 VMX_VMCS_RO_EXIT_QUALIFICATION,
520 VMX_VMCS_RO_IO_RCX,
521 VMX_VMCS_RO_IO_RSI,
522 VMX_VMCS_RO_IO_RDI,
523 VMX_VMCS_RO_IO_RIP,
524 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
525
526 /* Natural-width guest-state field */
527 VMX_VMCS_GUEST_CR0,
528 VMX_VMCS_GUEST_CR3,
529 VMX_VMCS_GUEST_CR4,
530 VMX_VMCS_GUEST_ES_BASE,
531 VMX_VMCS_GUEST_CS_BASE,
532 VMX_VMCS_GUEST_SS_BASE,
533 VMX_VMCS_GUEST_DS_BASE,
534 VMX_VMCS_GUEST_FS_BASE,
535 VMX_VMCS_GUEST_GS_BASE,
536 VMX_VMCS_GUEST_LDTR_BASE,
537 VMX_VMCS_GUEST_TR_BASE,
538 VMX_VMCS_GUEST_GDTR_BASE,
539 VMX_VMCS_GUEST_IDTR_BASE,
540 VMX_VMCS_GUEST_DR7,
541 VMX_VMCS_GUEST_RSP,
542 VMX_VMCS_GUEST_RIP,
543 VMX_VMCS_GUEST_RFLAGS,
544 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
545 VMX_VMCS_GUEST_SYSENTER_ESP,
546 VMX_VMCS_GUEST_SYSENTER_EIP,
547 VMX_VMCS_GUEST_S_CET,
548 VMX_VMCS_GUEST_SSP,
549 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
550
551 /* Natural-width host-state fields */
552 VMX_VMCS_HOST_CR0,
553 VMX_VMCS_HOST_CR3,
554 VMX_VMCS_HOST_CR4,
555 VMX_VMCS_HOST_FS_BASE,
556 VMX_VMCS_HOST_GS_BASE,
557 VMX_VMCS_HOST_TR_BASE,
558 VMX_VMCS_HOST_GDTR_BASE,
559 VMX_VMCS_HOST_IDTR_BASE,
560 VMX_VMCS_HOST_SYSENTER_ESP,
561 VMX_VMCS_HOST_SYSENTER_EIP,
562 VMX_VMCS_HOST_RSP,
563 VMX_VMCS_HOST_RIP,
564 VMX_VMCS_HOST_S_CET,
565 VMX_VMCS_HOST_SSP,
566 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
567};
568#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
569
570#ifdef HMVMX_USE_FUNCTION_TABLE
571/**
572 * VMX_EXIT dispatch table.
573 */
574static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
575{
576 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
577 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
578 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
579 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
580 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
581 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
582 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
583 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
584 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
585 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
586 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
587 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
588 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
589 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
590 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
591 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
592 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
593 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
594 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
596 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
597 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
598 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
599 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
600 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
601 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
602 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
603 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
604 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
605#else
606 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
607 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
608 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
609 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
610 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
611 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
612 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
613 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
614 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
615#endif
616 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
617 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
618 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
619 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
620 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
621 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
622 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
623 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
624 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
625 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
626 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
627 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
628 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
629 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
630 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
632 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
633 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
634 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
635 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
636 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
637 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
639 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
640#else
641 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
642#endif
643 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
644 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
646 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
647#else
648 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
651 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
652 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
653 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
654 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
655 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
656 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
657 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
658 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
659 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
660 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
661 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
662 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
663 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
664 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
665 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
666};
667#endif /* HMVMX_USE_FUNCTION_TABLE */
668
669#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
670static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
671{
672 /* 0 */ "(Not Used)",
673 /* 1 */ "VMCALL executed in VMX root operation.",
674 /* 2 */ "VMCLEAR with invalid physical address.",
675 /* 3 */ "VMCLEAR with VMXON pointer.",
676 /* 4 */ "VMLAUNCH with non-clear VMCS.",
677 /* 5 */ "VMRESUME with non-launched VMCS.",
678 /* 6 */ "VMRESUME after VMXOFF",
679 /* 7 */ "VM-entry with invalid control fields.",
680 /* 8 */ "VM-entry with invalid host state fields.",
681 /* 9 */ "VMPTRLD with invalid physical address.",
682 /* 10 */ "VMPTRLD with VMXON pointer.",
683 /* 11 */ "VMPTRLD with incorrect revision identifier.",
684 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
685 /* 13 */ "VMWRITE to read-only VMCS component.",
686 /* 14 */ "(Not Used)",
687 /* 15 */ "VMXON executed in VMX root operation.",
688 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
689 /* 17 */ "VM-entry with non-launched executing VMCS.",
690 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
691 /* 19 */ "VMCALL with non-clear VMCS.",
692 /* 20 */ "VMCALL with invalid VM-exit control fields.",
693 /* 21 */ "(Not Used)",
694 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
695 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
696 /* 24 */ "VMCALL with invalid SMM-monitor features.",
697 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
698 /* 26 */ "VM-entry with events blocked by MOV SS.",
699 /* 27 */ "(Not Used)",
700 /* 28 */ "Invalid operand to INVEPT/INVVPID."
701};
702#endif /* VBOX_STRICT && LOG_ENABLED */
703
704
705/**
706 * Gets the CR0 guest/host mask.
707 *
708 * These bits typically does not change through the lifetime of a VM. Any bit set in
709 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
710 * by the guest.
711 *
712 * @returns The CR0 guest/host mask.
713 * @param pVCpu The cross context virtual CPU structure.
714 */
715static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
716{
717 /*
718 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
719 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
720 *
721 * Furthermore, modifications to any bits that are reserved/unspecified currently
722 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
723 * when future CPUs specify and use currently reserved/unspecified bits.
724 */
725 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
726 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
727 * and @bugref{6944}. */
728 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
729 return ( X86_CR0_PE
730 | X86_CR0_NE
731 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
732 | X86_CR0_PG
733 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
734}
735
736
737/**
738 * Gets the CR4 guest/host mask.
739 *
740 * These bits typically does not change through the lifetime of a VM. Any bit set in
741 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
742 * by the guest.
743 *
744 * @returns The CR4 guest/host mask.
745 * @param pVCpu The cross context virtual CPU structure.
746 */
747static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
748{
749 /*
750 * We construct a mask of all CR4 bits that the guest can modify without causing
751 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
752 * a VM-exit when the guest attempts to modify them when executing using
753 * hardware-assisted VMX.
754 *
755 * When a feature is not exposed to the guest (and may be present on the host),
756 * we want to intercept guest modifications to the bit so we can emulate proper
757 * behavior (e.g., #GP).
758 *
759 * Furthermore, only modifications to those bits that don't require immediate
760 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
761 * depends on CR3 which might not always be the guest value while executing
762 * using hardware-assisted VMX.
763 */
764 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
765 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
766#ifdef IN_NEM_DARWIN
767 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
768#endif
769 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
770
771 /*
772 * Paranoia.
773 * Ensure features exposed to the guest are present on the host.
774 */
775 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
776#ifdef IN_NEM_DARWIN
777 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
778#endif
779 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
780
781 uint64_t const fGstMask = X86_CR4_PVI
782 | X86_CR4_TSD
783 | X86_CR4_DE
784 | X86_CR4_MCE
785 | X86_CR4_PCE
786 | X86_CR4_OSXMMEEXCPT
787 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
788#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
789 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
790 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
791#endif
792 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
793 return ~fGstMask;
794}
795
796
797/**
798 * Adds one or more exceptions to the exception bitmap and commits it to the current
799 * VMCS.
800 *
801 * @param pVCpu The cross context virtual CPU structure.
802 * @param pVmxTransient The VMX-transient structure.
803 * @param uXcptMask The exception(s) to add.
804 */
805static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
806{
807 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
808 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
809 if ((uXcptBitmap & uXcptMask) != uXcptMask)
810 {
811 uXcptBitmap |= uXcptMask;
812 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
813 AssertRC(rc);
814 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
815 }
816}
817
818
819/**
820 * Adds an exception to the exception bitmap and commits it to the current VMCS.
821 *
822 * @param pVCpu The cross context virtual CPU structure.
823 * @param pVmxTransient The VMX-transient structure.
824 * @param uXcpt The exception to add.
825 */
826static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
827{
828 Assert(uXcpt <= X86_XCPT_LAST);
829 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
830}
831
832
833/**
834 * Remove one or more exceptions from the exception bitmap and commits it to the
835 * current VMCS.
836 *
837 * This takes care of not removing the exception intercept if a nested-guest
838 * requires the exception to be intercepted.
839 *
840 * @returns VBox status code.
841 * @param pVCpu The cross context virtual CPU structure.
842 * @param pVmxTransient The VMX-transient structure.
843 * @param uXcptMask The exception(s) to remove.
844 */
845static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
846{
847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
848 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
849 if (u32XcptBitmap & uXcptMask)
850 {
851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
852 if (!pVmxTransient->fIsNestedGuest)
853 { /* likely */ }
854 else
855 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
856#endif
857#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
858 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
859 | RT_BIT(X86_XCPT_DE)
860 | RT_BIT(X86_XCPT_NM)
861 | RT_BIT(X86_XCPT_TS)
862 | RT_BIT(X86_XCPT_UD)
863 | RT_BIT(X86_XCPT_NP)
864 | RT_BIT(X86_XCPT_SS)
865 | RT_BIT(X86_XCPT_GP)
866 | RT_BIT(X86_XCPT_PF)
867 | RT_BIT(X86_XCPT_MF));
868#elif defined(HMVMX_ALWAYS_TRAP_PF)
869 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
870#endif
871 if (uXcptMask)
872 {
873 /* Validate we are not removing any essential exception intercepts. */
874#ifndef IN_NEM_DARWIN
875 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
876#else
877 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
878#endif
879 NOREF(pVCpu);
880 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
881 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
882
883 /* Remove it from the exception bitmap. */
884 u32XcptBitmap &= ~uXcptMask;
885
886 /* Commit and update the cache if necessary. */
887 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
888 {
889 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
890 AssertRC(rc);
891 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
892 }
893 }
894 }
895 return VINF_SUCCESS;
896}
897
898
899/**
900 * Remove an exceptions from the exception bitmap and commits it to the current
901 * VMCS.
902 *
903 * @returns VBox status code.
904 * @param pVCpu The cross context virtual CPU structure.
905 * @param pVmxTransient The VMX-transient structure.
906 * @param uXcpt The exception to remove.
907 */
908static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
909{
910 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
911}
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
914
915/**
916 * Loads the shadow VMCS specified by the VMCS info. object.
917 *
918 * @returns VBox status code.
919 * @param pVmcsInfo The VMCS info. object.
920 *
921 * @remarks Can be called with interrupts disabled.
922 */
923static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
924{
925 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
926 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
927
928 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
929 if (RT_SUCCESS(rc))
930 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
931 return rc;
932}
933
934
935/**
936 * Clears the shadow VMCS specified by the VMCS info. object.
937 *
938 * @returns VBox status code.
939 * @param pVmcsInfo The VMCS info. object.
940 *
941 * @remarks Can be called with interrupts disabled.
942 */
943static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
944{
945 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
946 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
947
948 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
949 if (RT_SUCCESS(rc))
950 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
951 return rc;
952}
953
954
955/**
956 * Switches from and to the specified VMCSes.
957 *
958 * @returns VBox status code.
959 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
960 * @param pVmcsInfoTo The VMCS info. object we are switching to.
961 *
962 * @remarks Called with interrupts disabled.
963 */
964static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
965{
966 /*
967 * Clear the VMCS we are switching out if it has not already been cleared.
968 * This will sync any CPU internal data back to the VMCS.
969 */
970 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
971 {
972 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
973 if (RT_SUCCESS(rc))
974 {
975 /*
976 * The shadow VMCS, if any, would not be active at this point since we
977 * would have cleared it while importing the virtual hardware-virtualization
978 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
979 * clear the shadow VMCS here, just assert for safety.
980 */
981 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
982 }
983 else
984 return rc;
985 }
986
987 /*
988 * Clear the VMCS we are switching to if it has not already been cleared.
989 * This will initialize the VMCS launch state to "clear" required for loading it.
990 *
991 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
992 */
993 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
994 {
995 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
996 if (RT_SUCCESS(rc))
997 { /* likely */ }
998 else
999 return rc;
1000 }
1001
1002 /*
1003 * Finally, load the VMCS we are switching to.
1004 */
1005 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1006}
1007
1008
1009/**
1010 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1011 * caller.
1012 *
1013 * @returns VBox status code.
1014 * @param pVCpu The cross context virtual CPU structure.
1015 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1016 * true) or guest VMCS (pass false).
1017 */
1018static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1019{
1020 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1021 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1022
1023 PVMXVMCSINFO pVmcsInfoFrom;
1024 PVMXVMCSINFO pVmcsInfoTo;
1025 if (fSwitchToNstGstVmcs)
1026 {
1027 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1028 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1029 }
1030 else
1031 {
1032 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1033 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1034 }
1035
1036 /*
1037 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1038 * preemption hook code path acquires the current VMCS.
1039 */
1040 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1041
1042 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1043 if (RT_SUCCESS(rc))
1044 {
1045 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1046 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1047
1048 /*
1049 * If we are switching to a VMCS that was executed on a different host CPU or was
1050 * never executed before, flag that we need to export the host state before executing
1051 * guest/nested-guest code using hardware-assisted VMX.
1052 *
1053 * This could probably be done in a preemptible context since the preemption hook
1054 * will flag the necessary change in host context. However, since preemption is
1055 * already disabled and to avoid making assumptions about host specific code in
1056 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1057 * disabled.
1058 */
1059 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1060 { /* likely */ }
1061 else
1062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1063
1064 ASMSetFlags(fEFlags);
1065
1066 /*
1067 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1068 * flag that we need to update the host MSR values there. Even if we decide in the
1069 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1070 * if its content differs, we would have to update the host MSRs anyway.
1071 */
1072 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1073 }
1074 else
1075 ASMSetFlags(fEFlags);
1076 return rc;
1077}
1078
1079#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1080#ifdef VBOX_STRICT
1081
1082/**
1083 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1084 * transient structure.
1085 *
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param pVmxTransient The VMX-transient structure.
1088 */
1089DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1090{
1091 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1092 AssertRC(rc);
1093}
1094
1095
1096/**
1097 * Reads the VM-entry exception error code field from the VMCS into
1098 * the VMX transient structure.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure.
1101 * @param pVmxTransient The VMX-transient structure.
1102 */
1103DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1104{
1105 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1106 AssertRC(rc);
1107}
1108
1109
1110/**
1111 * Reads the VM-entry exception error code field from the VMCS into
1112 * the VMX transient structure.
1113 *
1114 * @param pVCpu The cross context virtual CPU structure.
1115 * @param pVmxTransient The VMX-transient structure.
1116 */
1117DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1118{
1119 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1120 AssertRC(rc);
1121}
1122
1123#endif /* VBOX_STRICT */
1124
1125
1126/**
1127 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1128 *
1129 * Don't call directly unless the it's likely that some or all of the fields
1130 * given in @a a_fReadMask have already been read.
1131 *
1132 * @tparam a_fReadMask The fields to read.
1133 * @param pVCpu The cross context virtual CPU structure.
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136template<uint32_t const a_fReadMask>
1137static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1138{
1139 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1140 | HMVMX_READ_EXIT_INSTR_LEN
1141 | HMVMX_READ_EXIT_INSTR_INFO
1142 | HMVMX_READ_IDT_VECTORING_INFO
1143 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1144 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1145 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1146 | HMVMX_READ_GUEST_LINEAR_ADDR
1147 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1148 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1149 )) == 0);
1150
1151 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1152 {
1153 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1154
1155 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1156 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1157 {
1158 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1159 AssertRC(rc);
1160 }
1161 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1162 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1163 {
1164 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1165 AssertRC(rc);
1166 }
1167 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1168 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1169 {
1170 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1171 AssertRC(rc);
1172 }
1173 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1174 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1175 {
1176 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1177 AssertRC(rc);
1178 }
1179 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1180 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1181 {
1182 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1183 AssertRC(rc);
1184 }
1185 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1186 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1187 {
1188 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1189 AssertRC(rc);
1190 }
1191 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1192 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1193 {
1194 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1195 AssertRC(rc);
1196 }
1197 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1198 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1199 {
1200 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1201 AssertRC(rc);
1202 }
1203 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1204 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1205 {
1206 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1207 AssertRC(rc);
1208 }
1209 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1210 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1211 {
1212 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1213 AssertRC(rc);
1214 }
1215
1216 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1217 }
1218}
1219
1220
1221/**
1222 * Reads VMCS fields into the VMXTRANSIENT structure.
1223 *
1224 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1225 * generating an optimized read sequences w/o any conditionals between in
1226 * non-strict builds.
1227 *
1228 * @tparam a_fReadMask The fields to read. One or more of the
1229 * HMVMX_READ_XXX fields ORed together.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 * @param pVmxTransient The VMX-transient structure.
1232 */
1233template<uint32_t const a_fReadMask>
1234DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1235{
1236 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1237 | HMVMX_READ_EXIT_INSTR_LEN
1238 | HMVMX_READ_EXIT_INSTR_INFO
1239 | HMVMX_READ_IDT_VECTORING_INFO
1240 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1241 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1242 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1243 | HMVMX_READ_GUEST_LINEAR_ADDR
1244 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1245 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1246 )) == 0);
1247
1248 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1249 {
1250 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1251 {
1252 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1253 AssertRC(rc);
1254 }
1255 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1256 {
1257 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1258 AssertRC(rc);
1259 }
1260 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1261 {
1262 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1263 AssertRC(rc);
1264 }
1265 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1266 {
1267 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1268 AssertRC(rc);
1269 }
1270 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1271 {
1272 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1273 AssertRC(rc);
1274 }
1275 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1276 {
1277 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1278 AssertRC(rc);
1279 }
1280 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1281 {
1282 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1283 AssertRC(rc);
1284 }
1285 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1286 {
1287 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1288 AssertRC(rc);
1289 }
1290 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1291 {
1292 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1293 AssertRC(rc);
1294 }
1295 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1296 {
1297 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1298 AssertRC(rc);
1299 }
1300
1301 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1302 }
1303 else
1304 {
1305 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1306 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1307 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1308 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1309 }
1310}
1311
1312
1313#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1314/**
1315 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1316 *
1317 * @param pVCpu The cross context virtual CPU structure.
1318 * @param pVmxTransient The VMX-transient structure.
1319 */
1320static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1321{
1322 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1323 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1324 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1325 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1326 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1327 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1328 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1329 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1330 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1331 AssertRC(rc);
1332 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1333 | HMVMX_READ_EXIT_INSTR_LEN
1334 | HMVMX_READ_EXIT_INSTR_INFO
1335 | HMVMX_READ_IDT_VECTORING_INFO
1336 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1337 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1338 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1339 | HMVMX_READ_GUEST_LINEAR_ADDR
1340 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1341}
1342#endif
1343
1344/**
1345 * Verifies that our cached values of the VMCS fields are all consistent with
1346 * what's actually present in the VMCS.
1347 *
1348 * @returns VBox status code.
1349 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1350 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1351 * VMCS content. HMCPU error-field is
1352 * updated, see VMX_VCI_XXX.
1353 * @param pVCpu The cross context virtual CPU structure.
1354 * @param pVmcsInfo The VMCS info. object.
1355 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1356 */
1357static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1358{
1359 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1360
1361 uint32_t u32Val;
1362 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1363 AssertRC(rc);
1364 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1365 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1366 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1367 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1368
1369 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1372 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1379 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1386 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1391 {
1392 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1395 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398 }
1399
1400 uint64_t u64Val;
1401 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1402 {
1403 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1404 AssertRC(rc);
1405 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1406 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1407 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1408 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1409 }
1410
1411 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1414 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417
1418 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1421 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 NOREF(pcszVmcs);
1426 return VINF_SUCCESS;
1427}
1428
1429
1430/**
1431 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1432 * VMCS.
1433 *
1434 * This is typically required when the guest changes paging mode.
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The cross context virtual CPU structure.
1438 * @param pVmxTransient The VMX-transient structure.
1439 *
1440 * @remarks Requires EFER.
1441 * @remarks No-long-jump zone!!!
1442 */
1443static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1444{
1445 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1446 {
1447 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1448 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1449
1450 /*
1451 * VM-entry controls.
1452 */
1453 {
1454 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1455 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1456
1457 /*
1458 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1459 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1460 *
1461 * For nested-guests, this is a mandatory VM-entry control. It's also
1462 * required because we do not want to leak host bits to the nested-guest.
1463 */
1464 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1465
1466 /*
1467 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1468 *
1469 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1470 * required to get the nested-guest working with hardware-assisted VMX execution.
1471 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1472 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1473 * here rather than while merging the guest VMCS controls.
1474 */
1475 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1476 {
1477 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1478 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1479 }
1480 else
1481 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1482
1483 /*
1484 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1485 *
1486 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1487 * regardless of whether the nested-guest VMCS specifies it because we are free to
1488 * load whatever MSRs we require and we do not need to modify the guest visible copy
1489 * of the VM-entry MSR load area.
1490 */
1491 if ( g_fHmVmxSupportsVmcsEfer
1492#ifndef IN_NEM_DARWIN
1493 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1494#endif
1495 )
1496 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1497 else
1498 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1499
1500 /*
1501 * The following should -not- be set (since we're not in SMM mode):
1502 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1503 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1504 */
1505
1506 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1507 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1508
1509 if ((fVal & fZap) == fVal)
1510 { /* likely */ }
1511 else
1512 {
1513 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1514 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1515 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1516 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1517 }
1518
1519 /* Commit it to the VMCS. */
1520 if (pVmcsInfo->u32EntryCtls != fVal)
1521 {
1522 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1523 AssertRC(rc);
1524 pVmcsInfo->u32EntryCtls = fVal;
1525 }
1526 }
1527
1528 /*
1529 * VM-exit controls.
1530 */
1531 {
1532 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1533 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1534
1535 /*
1536 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1537 * supported the 1-setting of this bit.
1538 *
1539 * For nested-guests, we set the "save debug controls" as the converse
1540 * "load debug controls" is mandatory for nested-guests anyway.
1541 */
1542 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1543
1544 /*
1545 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1546 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1547 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1548 * vmxHCExportHostMsrs().
1549 *
1550 * For nested-guests, we always set this bit as we do not support 32-bit
1551 * hosts.
1552 */
1553 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1554
1555#ifndef IN_NEM_DARWIN
1556 /*
1557 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1558 *
1559 * For nested-guests, we should use the "save IA32_EFER" control if we also
1560 * used the "load IA32_EFER" control while exporting VM-entry controls.
1561 */
1562 if ( g_fHmVmxSupportsVmcsEfer
1563 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1564 {
1565 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1566 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1567 }
1568#endif
1569
1570 /*
1571 * Enable saving of the VMX-preemption timer value on VM-exit.
1572 * For nested-guests, currently not exposed/used.
1573 */
1574 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1575 * the timer value. */
1576 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1577 {
1578 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1579 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1580 }
1581
1582 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1583 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1584
1585 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1586 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1587 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1588
1589 if ((fVal & fZap) == fVal)
1590 { /* likely */ }
1591 else
1592 {
1593 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1594 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1595 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1596 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1597 }
1598
1599 /* Commit it to the VMCS. */
1600 if (pVmcsInfo->u32ExitCtls != fVal)
1601 {
1602 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1603 AssertRC(rc);
1604 pVmcsInfo->u32ExitCtls = fVal;
1605 }
1606 }
1607
1608 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1609 }
1610 return VINF_SUCCESS;
1611}
1612
1613
1614/**
1615 * Sets the TPR threshold in the VMCS.
1616 *
1617 * @param pVCpu The cross context virtual CPU structure.
1618 * @param pVmcsInfo The VMCS info. object.
1619 * @param u32TprThreshold The TPR threshold (task-priority class only).
1620 */
1621DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1622{
1623 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1624 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1625 RT_NOREF(pVmcsInfo);
1626 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1627 AssertRC(rc);
1628}
1629
1630
1631/**
1632 * Exports the guest APIC TPR state into the VMCS.
1633 *
1634 * @param pVCpu The cross context virtual CPU structure.
1635 * @param pVmxTransient The VMX-transient structure.
1636 *
1637 * @remarks No-long-jump zone!!!
1638 */
1639static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1640{
1641 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1642 {
1643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1644
1645 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1646 if (!pVmxTransient->fIsNestedGuest)
1647 {
1648 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1649 && APICIsEnabled(pVCpu))
1650 {
1651 /*
1652 * Setup TPR shadowing.
1653 */
1654 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1655 {
1656 bool fPendingIntr = false;
1657 uint8_t u8Tpr = 0;
1658 uint8_t u8PendingIntr = 0;
1659 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1660 AssertRC(rc);
1661
1662 /*
1663 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1664 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1665 * priority of the pending interrupt so we can deliver the interrupt. If there
1666 * are no interrupts pending, set threshold to 0 to not cause any
1667 * TPR-below-threshold VM-exits.
1668 */
1669 uint32_t u32TprThreshold = 0;
1670 if (fPendingIntr)
1671 {
1672 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1673 (which is the Task-Priority Class). */
1674 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1675 const uint8_t u8TprPriority = u8Tpr >> 4;
1676 if (u8PendingPriority <= u8TprPriority)
1677 u32TprThreshold = u8PendingPriority;
1678 }
1679
1680 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1681 }
1682 }
1683 }
1684 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1685 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1686 }
1687}
1688
1689
1690/**
1691 * Gets the guest interruptibility-state and updates related force-flags.
1692 *
1693 * @returns Guest's interruptibility-state.
1694 * @param pVCpu The cross context virtual CPU structure.
1695 *
1696 * @remarks No-long-jump zone!!!
1697 */
1698static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1699{
1700 uint32_t fIntrState;
1701
1702 /*
1703 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1704 */
1705 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1706 fIntrState = 0;
1707 else
1708 {
1709 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1710 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1711
1712 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1713 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1714 else
1715 {
1716 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1717
1718 /* Block-by-STI must not be set when interrupts are disabled. */
1719 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1720 }
1721 }
1722
1723 /*
1724 * Check if we should inhibit NMI delivery.
1725 */
1726 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1727 { /* likely */ }
1728 else
1729 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1730
1731 /*
1732 * Validate.
1733 */
1734 /* We don't support block-by-SMI yet.*/
1735 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1736
1737 return fIntrState;
1738}
1739
1740
1741/**
1742 * Exports the exception intercepts required for guest execution in the VMCS.
1743 *
1744 * @param pVCpu The cross context virtual CPU structure.
1745 * @param pVmxTransient The VMX-transient structure.
1746 *
1747 * @remarks No-long-jump zone!!!
1748 */
1749static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1750{
1751 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1752 {
1753 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1754 if ( !pVmxTransient->fIsNestedGuest
1755 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1756 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1757 else
1758 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1759
1760 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1761 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1762 }
1763}
1764
1765
1766/**
1767 * Exports the guest's RIP into the guest-state area in the VMCS.
1768 *
1769 * @param pVCpu The cross context virtual CPU structure.
1770 *
1771 * @remarks No-long-jump zone!!!
1772 */
1773static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1774{
1775 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1776 {
1777 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1778
1779 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1780 AssertRC(rc);
1781
1782 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1783 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1784 }
1785}
1786
1787
1788/**
1789 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1790 *
1791 * @param pVCpu The cross context virtual CPU structure.
1792 * @param pVmxTransient The VMX-transient structure.
1793 *
1794 * @remarks No-long-jump zone!!!
1795 */
1796static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1797{
1798 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1799 {
1800 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1801
1802 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1803 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1804 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1805 Use 32-bit VMWRITE. */
1806 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1807 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1808 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1809
1810#ifndef IN_NEM_DARWIN
1811 /*
1812 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1813 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1814 * can run the real-mode guest code under Virtual 8086 mode.
1815 */
1816 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1817 if (pVmcsInfo->RealMode.fRealOnV86Active)
1818 {
1819 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1820 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1821 Assert(!pVmxTransient->fIsNestedGuest);
1822 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1823 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1824 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1825 }
1826#else
1827 RT_NOREF(pVmxTransient);
1828#endif
1829
1830 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1831 AssertRC(rc);
1832
1833 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1834 Log4Func(("eflags=%#RX32\n", fEFlags));
1835 }
1836}
1837
1838
1839#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1840/**
1841 * Copies the nested-guest VMCS to the shadow VMCS.
1842 *
1843 * @returns VBox status code.
1844 * @param pVCpu The cross context virtual CPU structure.
1845 * @param pVmcsInfo The VMCS info. object.
1846 *
1847 * @remarks No-long-jump zone!!!
1848 */
1849static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1850{
1851 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1852 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1853
1854 /*
1855 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1856 * current VMCS, as we may try saving guest lazy MSRs.
1857 *
1858 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1859 * calling the import VMCS code which is currently performing the guest MSR reads
1860 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1861 * and the rest of the VMX leave session machinery.
1862 */
1863 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1864
1865 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1866 if (RT_SUCCESS(rc))
1867 {
1868 /*
1869 * Copy all guest read/write VMCS fields.
1870 *
1871 * We don't check for VMWRITE failures here for performance reasons and
1872 * because they are not expected to fail, barring irrecoverable conditions
1873 * like hardware errors.
1874 */
1875 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1876 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1877 {
1878 uint64_t u64Val;
1879 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1880 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1881 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1882 }
1883
1884 /*
1885 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1886 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1887 */
1888 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1889 {
1890 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1891 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1892 {
1893 uint64_t u64Val;
1894 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1895 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1896 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1897 }
1898 }
1899
1900 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1901 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1902 }
1903
1904 ASMSetFlags(fEFlags);
1905 return rc;
1906}
1907
1908
1909/**
1910 * Copies the shadow VMCS to the nested-guest VMCS.
1911 *
1912 * @returns VBox status code.
1913 * @param pVCpu The cross context virtual CPU structure.
1914 * @param pVmcsInfo The VMCS info. object.
1915 *
1916 * @remarks Called with interrupts disabled.
1917 */
1918static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1919{
1920 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1921 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1922 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1923
1924 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1925 if (RT_SUCCESS(rc))
1926 {
1927 /*
1928 * Copy guest read/write fields from the shadow VMCS.
1929 * Guest read-only fields cannot be modified, so no need to copy them.
1930 *
1931 * We don't check for VMREAD failures here for performance reasons and
1932 * because they are not expected to fail, barring irrecoverable conditions
1933 * like hardware errors.
1934 */
1935 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1936 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1937 {
1938 uint64_t u64Val;
1939 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1940 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1941 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1942 }
1943
1944 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1945 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1946 }
1947 return rc;
1948}
1949
1950
1951/**
1952 * Enables VMCS shadowing for the given VMCS info. object.
1953 *
1954 * @param pVCpu The cross context virtual CPU structure.
1955 * @param pVmcsInfo The VMCS info. object.
1956 *
1957 * @remarks No-long-jump zone!!!
1958 */
1959static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1960{
1961 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1962 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1963 {
1964 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1965 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1966 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1967 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1968 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1969 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1970 Log4Func(("Enabled\n"));
1971 }
1972}
1973
1974
1975/**
1976 * Disables VMCS shadowing for the given VMCS info. object.
1977 *
1978 * @param pVCpu The cross context virtual CPU structure.
1979 * @param pVmcsInfo The VMCS info. object.
1980 *
1981 * @remarks No-long-jump zone!!!
1982 */
1983static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1984{
1985 /*
1986 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1987 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1988 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1989 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1990 *
1991 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
1992 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
1993 */
1994 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1995 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
1996 {
1997 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
1998 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1999 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2000 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2001 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2002 Log4Func(("Disabled\n"));
2003 }
2004}
2005#endif
2006
2007
2008/**
2009 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2010 *
2011 * The guest FPU state is always pre-loaded hence we don't need to bother about
2012 * sharing FPU related CR0 bits between the guest and host.
2013 *
2014 * @returns VBox status code.
2015 * @param pVCpu The cross context virtual CPU structure.
2016 * @param pVmxTransient The VMX-transient structure.
2017 *
2018 * @remarks No-long-jump zone!!!
2019 */
2020static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2021{
2022 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2023 {
2024 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2025 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2026
2027 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2028 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2029 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2030 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2031 else
2032 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2033
2034 if (!pVmxTransient->fIsNestedGuest)
2035 {
2036 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2037 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2038 uint64_t const u64ShadowCr0 = u64GuestCr0;
2039 Assert(!RT_HI_U32(u64GuestCr0));
2040
2041 /*
2042 * Setup VT-x's view of the guest CR0.
2043 */
2044 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2045 if (VM_IS_VMX_NESTED_PAGING(pVM))
2046 {
2047#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2048 if (CPUMIsGuestPagingEnabled(pVCpu))
2049 {
2050 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2051 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2052 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2053 }
2054 else
2055 {
2056 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2057 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2058 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2059 }
2060
2061 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2062 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2063 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2064#endif
2065 }
2066 else
2067 {
2068 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2069 u64GuestCr0 |= X86_CR0_WP;
2070 }
2071
2072 /*
2073 * Guest FPU bits.
2074 *
2075 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2076 * using CR0.TS.
2077 *
2078 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2079 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2080 */
2081 u64GuestCr0 |= X86_CR0_NE;
2082
2083 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2084 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2085
2086 /*
2087 * Update exception intercepts.
2088 */
2089 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2090#ifndef IN_NEM_DARWIN
2091 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2092 {
2093 Assert(PDMVmmDevHeapIsEnabled(pVM));
2094 Assert(pVM->hm.s.vmx.pRealModeTSS);
2095 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2096 }
2097 else
2098#endif
2099 {
2100 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2101 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2102 if (fInterceptMF)
2103 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2104 }
2105
2106 /* Additional intercepts for debugging, define these yourself explicitly. */
2107#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2108 uXcptBitmap |= 0
2109 | RT_BIT(X86_XCPT_BP)
2110 | RT_BIT(X86_XCPT_DE)
2111 | RT_BIT(X86_XCPT_NM)
2112 | RT_BIT(X86_XCPT_TS)
2113 | RT_BIT(X86_XCPT_UD)
2114 | RT_BIT(X86_XCPT_NP)
2115 | RT_BIT(X86_XCPT_SS)
2116 | RT_BIT(X86_XCPT_GP)
2117 | RT_BIT(X86_XCPT_PF)
2118 | RT_BIT(X86_XCPT_MF)
2119 ;
2120#elif defined(HMVMX_ALWAYS_TRAP_PF)
2121 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2122#endif
2123 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2124 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2125 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2126 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2127 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2128
2129 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2130 u64GuestCr0 |= fSetCr0;
2131 u64GuestCr0 &= fZapCr0;
2132 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2133
2134 /* Commit the CR0 and related fields to the guest VMCS. */
2135 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2136 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2137 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2138 {
2139 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2140 AssertRC(rc);
2141 }
2142 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2143 {
2144 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2145 AssertRC(rc);
2146 }
2147
2148 /* Update our caches. */
2149 pVmcsInfo->u32ProcCtls = uProcCtls;
2150 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2151
2152 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2153 }
2154 else
2155 {
2156 /*
2157 * With nested-guests, we may have extended the guest/host mask here since we
2158 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2159 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2160 * originally supplied. We must copy those bits from the nested-guest CR0 into
2161 * the nested-guest CR0 read-shadow.
2162 */
2163 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2164 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2165 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2166 Assert(!RT_HI_U32(u64GuestCr0));
2167 Assert(u64GuestCr0 & X86_CR0_NE);
2168
2169 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2170 u64GuestCr0 |= fSetCr0;
2171 u64GuestCr0 &= fZapCr0;
2172 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2173
2174 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2175 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2176 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2177
2178 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2179 }
2180
2181 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2182 }
2183
2184 return VINF_SUCCESS;
2185}
2186
2187
2188/**
2189 * Exports the guest control registers (CR3, CR4) into the guest-state area
2190 * in the VMCS.
2191 *
2192 * @returns VBox strict status code.
2193 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2194 * without unrestricted guest access and the VMMDev is not presently
2195 * mapped (e.g. EFI32).
2196 *
2197 * @param pVCpu The cross context virtual CPU structure.
2198 * @param pVmxTransient The VMX-transient structure.
2199 *
2200 * @remarks No-long-jump zone!!!
2201 */
2202static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2203{
2204 int rc = VINF_SUCCESS;
2205 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2206
2207 /*
2208 * Guest CR2.
2209 * It's always loaded in the assembler code. Nothing to do here.
2210 */
2211
2212 /*
2213 * Guest CR3.
2214 */
2215 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2216 {
2217 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2218
2219 if (VM_IS_VMX_NESTED_PAGING(pVM))
2220 {
2221#ifndef IN_NEM_DARWIN
2222 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2223 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2224
2225 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2226 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2227 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2228 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2229
2230 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2231 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2232 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2233
2234 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2235 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2236 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2237 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2238 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2239 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2240 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2241
2242 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2243 AssertRC(rc);
2244#endif
2245
2246 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2247 uint64_t u64GuestCr3 = pCtx->cr3;
2248 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2249 || CPUMIsGuestPagingEnabledEx(pCtx))
2250 {
2251 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2252 if (CPUMIsGuestInPAEModeEx(pCtx))
2253 {
2254 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2256 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2257 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2258 }
2259
2260 /*
2261 * The guest's view of its CR3 is unblemished with nested paging when the
2262 * guest is using paging or we have unrestricted guest execution to handle
2263 * the guest when it's not using paging.
2264 */
2265 }
2266#ifndef IN_NEM_DARWIN
2267 else
2268 {
2269 /*
2270 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2271 * thinks it accesses physical memory directly, we use our identity-mapped
2272 * page table to map guest-linear to guest-physical addresses. EPT takes care
2273 * of translating it to host-physical addresses.
2274 */
2275 RTGCPHYS GCPhys;
2276 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2277
2278 /* We obtain it here every time as the guest could have relocated this PCI region. */
2279 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2280 if (RT_SUCCESS(rc))
2281 { /* likely */ }
2282 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2283 {
2284 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2285 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2286 }
2287 else
2288 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2289
2290 u64GuestCr3 = GCPhys;
2291 }
2292#endif
2293
2294 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2295 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2296 AssertRC(rc);
2297 }
2298 else
2299 {
2300 Assert(!pVmxTransient->fIsNestedGuest);
2301 /* Non-nested paging case, just use the hypervisor's CR3. */
2302 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2303
2304 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2305 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2306 AssertRC(rc);
2307 }
2308
2309 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2310 }
2311
2312 /*
2313 * Guest CR4.
2314 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2315 */
2316 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2317 {
2318 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2319 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2320
2321 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2322 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2323
2324 /*
2325 * With nested-guests, we may have extended the guest/host mask here (since we
2326 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2327 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2328 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2329 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2330 */
2331 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2332 uint64_t u64GuestCr4 = pCtx->cr4;
2333 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2334 ? pCtx->cr4
2335 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2336 Assert(!RT_HI_U32(u64GuestCr4));
2337
2338#ifndef IN_NEM_DARWIN
2339 /*
2340 * Setup VT-x's view of the guest CR4.
2341 *
2342 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2343 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2344 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2345 *
2346 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2347 */
2348 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2349 {
2350 Assert(pVM->hm.s.vmx.pRealModeTSS);
2351 Assert(PDMVmmDevHeapIsEnabled(pVM));
2352 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2353 }
2354#endif
2355
2356 if (VM_IS_VMX_NESTED_PAGING(pVM))
2357 {
2358 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2359 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2360 {
2361 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2362 u64GuestCr4 |= X86_CR4_PSE;
2363 /* Our identity mapping is a 32-bit page directory. */
2364 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2365 }
2366 /* else use guest CR4.*/
2367 }
2368 else
2369 {
2370 Assert(!pVmxTransient->fIsNestedGuest);
2371
2372 /*
2373 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2374 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2375 */
2376 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2377 {
2378 case PGMMODE_REAL: /* Real-mode. */
2379 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2380 case PGMMODE_32_BIT: /* 32-bit paging. */
2381 {
2382 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2383 break;
2384 }
2385
2386 case PGMMODE_PAE: /* PAE paging. */
2387 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2388 {
2389 u64GuestCr4 |= X86_CR4_PAE;
2390 break;
2391 }
2392
2393 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2394 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2395 {
2396#ifdef VBOX_WITH_64_BITS_GUESTS
2397 /* For our assumption in vmxHCShouldSwapEferMsr. */
2398 Assert(u64GuestCr4 & X86_CR4_PAE);
2399 break;
2400#endif
2401 }
2402 default:
2403 AssertFailed();
2404 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2405 }
2406 }
2407
2408 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2409 u64GuestCr4 |= fSetCr4;
2410 u64GuestCr4 &= fZapCr4;
2411
2412 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2413 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2414 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2415
2416#ifndef IN_NEM_DARWIN
2417 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2418 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2419 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2420 {
2421 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2422 hmR0VmxUpdateStartVmFunction(pVCpu);
2423 }
2424#endif
2425
2426 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2427
2428 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2429 }
2430 return rc;
2431}
2432
2433
2434#ifdef VBOX_STRICT
2435/**
2436 * Strict function to validate segment registers.
2437 *
2438 * @param pVCpu The cross context virtual CPU structure.
2439 * @param pVmcsInfo The VMCS info. object.
2440 *
2441 * @remarks Will import guest CR0 on strict builds during validation of
2442 * segments.
2443 */
2444static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2445{
2446 /*
2447 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2448 *
2449 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2450 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2451 * unusable bit and doesn't change the guest-context value.
2452 */
2453 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2454 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2455 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2456 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2457 && ( !CPUMIsGuestInRealModeEx(pCtx)
2458 && !CPUMIsGuestInV86ModeEx(pCtx)))
2459 {
2460 /* Protected mode checks */
2461 /* CS */
2462 Assert(pCtx->cs.Attr.n.u1Present);
2463 Assert(!(pCtx->cs.Attr.u & 0xf00));
2464 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2465 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2466 || !(pCtx->cs.Attr.n.u1Granularity));
2467 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2468 || (pCtx->cs.Attr.n.u1Granularity));
2469 /* CS cannot be loaded with NULL in protected mode. */
2470 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2471 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2472 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2473 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2474 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2475 else
2476 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2477 /* SS */
2478 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2479 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2480 if ( !(pCtx->cr0 & X86_CR0_PE)
2481 || pCtx->cs.Attr.n.u4Type == 3)
2482 {
2483 Assert(!pCtx->ss.Attr.n.u2Dpl);
2484 }
2485 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2486 {
2487 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2488 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2489 Assert(pCtx->ss.Attr.n.u1Present);
2490 Assert(!(pCtx->ss.Attr.u & 0xf00));
2491 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2492 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2493 || !(pCtx->ss.Attr.n.u1Granularity));
2494 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2495 || (pCtx->ss.Attr.n.u1Granularity));
2496 }
2497 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2498 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2499 {
2500 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2501 Assert(pCtx->ds.Attr.n.u1Present);
2502 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2503 Assert(!(pCtx->ds.Attr.u & 0xf00));
2504 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2505 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2506 || !(pCtx->ds.Attr.n.u1Granularity));
2507 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2508 || (pCtx->ds.Attr.n.u1Granularity));
2509 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2510 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2511 }
2512 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2513 {
2514 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2515 Assert(pCtx->es.Attr.n.u1Present);
2516 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2517 Assert(!(pCtx->es.Attr.u & 0xf00));
2518 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2519 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2520 || !(pCtx->es.Attr.n.u1Granularity));
2521 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2522 || (pCtx->es.Attr.n.u1Granularity));
2523 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2524 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2525 }
2526 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2527 {
2528 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2529 Assert(pCtx->fs.Attr.n.u1Present);
2530 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2531 Assert(!(pCtx->fs.Attr.u & 0xf00));
2532 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2533 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2534 || !(pCtx->fs.Attr.n.u1Granularity));
2535 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2536 || (pCtx->fs.Attr.n.u1Granularity));
2537 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2538 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2539 }
2540 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2541 {
2542 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2543 Assert(pCtx->gs.Attr.n.u1Present);
2544 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2545 Assert(!(pCtx->gs.Attr.u & 0xf00));
2546 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2547 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2548 || !(pCtx->gs.Attr.n.u1Granularity));
2549 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2550 || (pCtx->gs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2552 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2553 }
2554 /* 64-bit capable CPUs. */
2555 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2556 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2557 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2558 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2559 }
2560 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2561 || ( CPUMIsGuestInRealModeEx(pCtx)
2562 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2563 {
2564 /* Real and v86 mode checks. */
2565 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2566 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2567#ifndef IN_NEM_DARWIN
2568 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2569 {
2570 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2571 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2572 }
2573 else
2574#endif
2575 {
2576 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2577 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2578 }
2579
2580 /* CS */
2581 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2582 Assert(pCtx->cs.u32Limit == 0xffff);
2583 Assert(u32CSAttr == 0xf3);
2584 /* SS */
2585 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2586 Assert(pCtx->ss.u32Limit == 0xffff);
2587 Assert(u32SSAttr == 0xf3);
2588 /* DS */
2589 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2590 Assert(pCtx->ds.u32Limit == 0xffff);
2591 Assert(u32DSAttr == 0xf3);
2592 /* ES */
2593 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2594 Assert(pCtx->es.u32Limit == 0xffff);
2595 Assert(u32ESAttr == 0xf3);
2596 /* FS */
2597 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2598 Assert(pCtx->fs.u32Limit == 0xffff);
2599 Assert(u32FSAttr == 0xf3);
2600 /* GS */
2601 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2602 Assert(pCtx->gs.u32Limit == 0xffff);
2603 Assert(u32GSAttr == 0xf3);
2604 /* 64-bit capable CPUs. */
2605 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2606 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2607 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2608 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2609 }
2610}
2611#endif /* VBOX_STRICT */
2612
2613
2614/**
2615 * Exports a guest segment register into the guest-state area in the VMCS.
2616 *
2617 * @returns VBox status code.
2618 * @param pVCpu The cross context virtual CPU structure.
2619 * @param pVmcsInfo The VMCS info. object.
2620 * @param iSegReg The segment register number (X86_SREG_XXX).
2621 * @param pSelReg Pointer to the segment selector.
2622 *
2623 * @remarks No-long-jump zone!!!
2624 */
2625static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2626{
2627 Assert(iSegReg < X86_SREG_COUNT);
2628
2629 uint32_t u32Access = pSelReg->Attr.u;
2630#ifndef IN_NEM_DARWIN
2631 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2632#endif
2633 {
2634 /*
2635 * The way to differentiate between whether this is really a null selector or was just
2636 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2637 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2638 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2639 * NULL selectors loaded in protected-mode have their attribute as 0.
2640 */
2641 if (u32Access)
2642 { }
2643 else
2644 u32Access = X86DESCATTR_UNUSABLE;
2645 }
2646#ifndef IN_NEM_DARWIN
2647 else
2648 {
2649 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2650 u32Access = 0xf3;
2651 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2652 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2653 RT_NOREF_PV(pVCpu);
2654 }
2655#else
2656 RT_NOREF(pVmcsInfo);
2657#endif
2658
2659 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2660 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2661 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2662
2663 /*
2664 * Commit it to the VMCS.
2665 */
2666 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2667 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2668 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2669 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2670 return VINF_SUCCESS;
2671}
2672
2673
2674/**
2675 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2676 * area in the VMCS.
2677 *
2678 * @returns VBox status code.
2679 * @param pVCpu The cross context virtual CPU structure.
2680 * @param pVmxTransient The VMX-transient structure.
2681 *
2682 * @remarks Will import guest CR0 on strict builds during validation of
2683 * segments.
2684 * @remarks No-long-jump zone!!!
2685 */
2686static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2687{
2688 int rc = VERR_INTERNAL_ERROR_5;
2689#ifndef IN_NEM_DARWIN
2690 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2691#endif
2692 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2693 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2694#ifndef IN_NEM_DARWIN
2695 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2696#endif
2697
2698 /*
2699 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2700 */
2701 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2702 {
2703 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2704 {
2705 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2706#ifndef IN_NEM_DARWIN
2707 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2708 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2709#endif
2710 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2711 AssertRC(rc);
2712 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2713 }
2714
2715 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2716 {
2717 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2718#ifndef IN_NEM_DARWIN
2719 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2720 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2721#endif
2722 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2723 AssertRC(rc);
2724 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2725 }
2726
2727 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2728 {
2729 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2730#ifndef IN_NEM_DARWIN
2731 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2732 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2733#endif
2734 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2735 AssertRC(rc);
2736 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2737 }
2738
2739 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2740 {
2741 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2742#ifndef IN_NEM_DARWIN
2743 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2744 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2745#endif
2746 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2747 AssertRC(rc);
2748 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2749 }
2750
2751 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2752 {
2753 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2754#ifndef IN_NEM_DARWIN
2755 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2756 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2757#endif
2758 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2759 AssertRC(rc);
2760 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2761 }
2762
2763 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2764 {
2765 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2766#ifndef IN_NEM_DARWIN
2767 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2768 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2769#endif
2770 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2771 AssertRC(rc);
2772 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2773 }
2774
2775#ifdef VBOX_STRICT
2776 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2777#endif
2778 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2779 pCtx->cs.Attr.u));
2780 }
2781
2782 /*
2783 * Guest TR.
2784 */
2785 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2786 {
2787 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2788
2789 /*
2790 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2791 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2792 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2793 */
2794 uint16_t u16Sel;
2795 uint32_t u32Limit;
2796 uint64_t u64Base;
2797 uint32_t u32AccessRights;
2798#ifndef IN_NEM_DARWIN
2799 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2800#endif
2801 {
2802 u16Sel = pCtx->tr.Sel;
2803 u32Limit = pCtx->tr.u32Limit;
2804 u64Base = pCtx->tr.u64Base;
2805 u32AccessRights = pCtx->tr.Attr.u;
2806 }
2807#ifndef IN_NEM_DARWIN
2808 else
2809 {
2810 Assert(!pVmxTransient->fIsNestedGuest);
2811 Assert(pVM->hm.s.vmx.pRealModeTSS);
2812 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2813
2814 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2815 RTGCPHYS GCPhys;
2816 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2817 AssertRCReturn(rc, rc);
2818
2819 X86DESCATTR DescAttr;
2820 DescAttr.u = 0;
2821 DescAttr.n.u1Present = 1;
2822 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2823
2824 u16Sel = 0;
2825 u32Limit = HM_VTX_TSS_SIZE;
2826 u64Base = GCPhys;
2827 u32AccessRights = DescAttr.u;
2828 }
2829#endif
2830
2831 /* Validate. */
2832 Assert(!(u16Sel & RT_BIT(2)));
2833 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2834 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2835 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2836 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2837 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2838 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2839 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2840 Assert( (u32Limit & 0xfff) == 0xfff
2841 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2842 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2843 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2844
2845 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2846 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2847 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2848 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2849
2850 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2851 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2852 }
2853
2854 /*
2855 * Guest GDTR.
2856 */
2857 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2858 {
2859 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2860
2861 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2863
2864 /* Validate. */
2865 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2866
2867 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2868 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2869 }
2870
2871 /*
2872 * Guest LDTR.
2873 */
2874 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2875 {
2876 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2877
2878 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2879 uint32_t u32Access;
2880 if ( !pVmxTransient->fIsNestedGuest
2881 && !pCtx->ldtr.Attr.u)
2882 u32Access = X86DESCATTR_UNUSABLE;
2883 else
2884 u32Access = pCtx->ldtr.Attr.u;
2885
2886 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2887 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2888 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2889 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2890
2891 /* Validate. */
2892 if (!(u32Access & X86DESCATTR_UNUSABLE))
2893 {
2894 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2895 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2896 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2897 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2898 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2899 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2900 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2901 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2902 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2903 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2904 }
2905
2906 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2907 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2908 }
2909
2910 /*
2911 * Guest IDTR.
2912 */
2913 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2914 {
2915 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2916
2917 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2918 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2919
2920 /* Validate. */
2921 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2922
2923 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2924 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2925 }
2926
2927 return VINF_SUCCESS;
2928}
2929
2930
2931/**
2932 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2933 * VM-exit interruption info type.
2934 *
2935 * @returns The IEM exception flags.
2936 * @param uVector The event vector.
2937 * @param uVmxEventType The VMX event type.
2938 *
2939 * @remarks This function currently only constructs flags required for
2940 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2941 * and CR2 aspects of an exception are not included).
2942 */
2943static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2944{
2945 uint32_t fIemXcptFlags;
2946 switch (uVmxEventType)
2947 {
2948 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2949 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2950 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2951 break;
2952
2953 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2954 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2955 break;
2956
2957 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2958 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2959 break;
2960
2961 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2962 {
2963 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2964 if (uVector == X86_XCPT_BP)
2965 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2966 else if (uVector == X86_XCPT_OF)
2967 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2968 else
2969 {
2970 fIemXcptFlags = 0;
2971 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2972 }
2973 break;
2974 }
2975
2976 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2977 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2978 break;
2979
2980 default:
2981 fIemXcptFlags = 0;
2982 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2983 break;
2984 }
2985 return fIemXcptFlags;
2986}
2987
2988
2989/**
2990 * Sets an event as a pending event to be injected into the guest.
2991 *
2992 * @param pVCpu The cross context virtual CPU structure.
2993 * @param u32IntInfo The VM-entry interruption-information field.
2994 * @param cbInstr The VM-entry instruction length in bytes (for
2995 * software interrupts, exceptions and privileged
2996 * software exceptions).
2997 * @param u32ErrCode The VM-entry exception error code.
2998 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
2999 * page-fault.
3000 */
3001DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3002 RTGCUINTPTR GCPtrFaultAddress)
3003{
3004 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3005 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3006 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3007 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3008 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3009 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3010}
3011
3012
3013/**
3014 * Sets an external interrupt as pending-for-injection into the VM.
3015 *
3016 * @param pVCpu The cross context virtual CPU structure.
3017 * @param u8Interrupt The external interrupt vector.
3018 */
3019DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3020{
3021 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3022 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3023 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3024 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3025 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3026}
3027
3028
3029/**
3030 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3031 *
3032 * @param pVCpu The cross context virtual CPU structure.
3033 */
3034DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3035{
3036 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3037 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3040 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3041}
3042
3043
3044/**
3045 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3046 *
3047 * @param pVCpu The cross context virtual CPU structure.
3048 */
3049DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3050{
3051 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3052 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3053 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3055 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3056}
3057
3058
3059/**
3060 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3061 *
3062 * @param pVCpu The cross context virtual CPU structure.
3063 */
3064DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3065{
3066 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3067 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3070 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3071}
3072
3073
3074/**
3075 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3076 *
3077 * @param pVCpu The cross context virtual CPU structure.
3078 */
3079DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3080{
3081 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3082 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3085 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3086}
3087
3088
3089#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3090/**
3091 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 * @param u32ErrCode The error code for the general-protection exception.
3095 */
3096DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3097{
3098 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3102 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3103}
3104
3105
3106/**
3107 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 * @param u32ErrCode The error code for the stack exception.
3111 */
3112DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3113{
3114 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3118 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3119}
3120#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3121
3122
3123/**
3124 * Fixes up attributes for the specified segment register.
3125 *
3126 * @param pVCpu The cross context virtual CPU structure.
3127 * @param pSelReg The segment register that needs fixing.
3128 * @param pszRegName The register name (for logging and assertions).
3129 */
3130static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3131{
3132 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3133
3134 /*
3135 * If VT-x marks the segment as unusable, most other bits remain undefined:
3136 * - For CS the L, D and G bits have meaning.
3137 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3138 * - For the remaining data segments no bits are defined.
3139 *
3140 * The present bit and the unusable bit has been observed to be set at the
3141 * same time (the selector was supposed to be invalid as we started executing
3142 * a V8086 interrupt in ring-0).
3143 *
3144 * What should be important for the rest of the VBox code, is that the P bit is
3145 * cleared. Some of the other VBox code recognizes the unusable bit, but
3146 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3147 * safe side here, we'll strip off P and other bits we don't care about. If
3148 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3149 *
3150 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3151 */
3152#ifdef VBOX_STRICT
3153 uint32_t const uAttr = pSelReg->Attr.u;
3154#endif
3155
3156 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3157 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3158 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3159
3160#ifdef VBOX_STRICT
3161# ifndef IN_NEM_DARWIN
3162 VMMRZCallRing3Disable(pVCpu);
3163# endif
3164 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3165# ifdef DEBUG_bird
3166 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3167 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3168 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3169# endif
3170# ifndef IN_NEM_DARWIN
3171 VMMRZCallRing3Enable(pVCpu);
3172# endif
3173 NOREF(uAttr);
3174#endif
3175 RT_NOREF2(pVCpu, pszRegName);
3176}
3177
3178
3179/**
3180 * Imports a guest segment register from the current VMCS into the guest-CPU
3181 * context.
3182 *
3183 * @param pVCpu The cross context virtual CPU structure.
3184 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3185 *
3186 * @remarks Called with interrupts and/or preemption disabled.
3187 */
3188template<uint32_t const a_iSegReg>
3189DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3190{
3191 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3192 /* Check that the macros we depend upon here and in the export parenter function works: */
3193#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3194 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3195 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3196 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3197 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3198 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3199 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3200 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3201 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3202 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3203 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3204
3205 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3206
3207 uint16_t u16Sel;
3208 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3209 pSelReg->Sel = u16Sel;
3210 pSelReg->ValidSel = u16Sel;
3211
3212 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3213 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3214
3215 uint32_t u32Attr;
3216 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3217 pSelReg->Attr.u = u32Attr;
3218 if (u32Attr & X86DESCATTR_UNUSABLE)
3219 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3220
3221 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3222}
3223
3224
3225/**
3226 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3227 *
3228 * @param pVCpu The cross context virtual CPU structure.
3229 *
3230 * @remarks Called with interrupts and/or preemption disabled.
3231 */
3232DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3233{
3234 uint16_t u16Sel;
3235 uint64_t u64Base;
3236 uint32_t u32Limit, u32Attr;
3237 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3238 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3239 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3240 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3241
3242 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3243 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3244 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3245 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3246 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3247 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3248 if (u32Attr & X86DESCATTR_UNUSABLE)
3249 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3250}
3251
3252
3253/**
3254 * Imports the guest TR from the current VMCS into the guest-CPU context.
3255 *
3256 * @param pVCpu The cross context virtual CPU structure.
3257 *
3258 * @remarks Called with interrupts and/or preemption disabled.
3259 */
3260DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3261{
3262 uint16_t u16Sel;
3263 uint64_t u64Base;
3264 uint32_t u32Limit, u32Attr;
3265 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3266 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3267 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3268 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3269
3270 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3271 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3272 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3273 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3274 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3275 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3276 /* TR is the only selector that can never be unusable. */
3277 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3278}
3279
3280
3281/**
3282 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3283 *
3284 * @returns The RIP value.
3285 * @param pVCpu The cross context virtual CPU structure.
3286 *
3287 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3288 * @remarks Do -not- call this function directly!
3289 */
3290DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3291{
3292 uint64_t u64Val;
3293 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3294 AssertRC(rc);
3295
3296 pVCpu->cpum.GstCtx.rip = u64Val;
3297
3298 return u64Val;
3299}
3300
3301
3302/**
3303 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3304 *
3305 * @param pVCpu The cross context virtual CPU structure.
3306 *
3307 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3308 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3309 * instead!!!
3310 */
3311DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3312{
3313 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3314 {
3315 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3316 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3317 }
3318}
3319
3320
3321/**
3322 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3323 *
3324 * @param pVCpu The cross context virtual CPU structure.
3325 * @param pVmcsInfo The VMCS info. object.
3326 *
3327 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3328 * @remarks Do -not- call this function directly!
3329 */
3330DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3331{
3332 uint64_t fRFlags;
3333 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3334 AssertRC(rc);
3335
3336 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3337 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3338
3339 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3340#ifndef IN_NEM_DARWIN
3341 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3342 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3343 { /* mostly likely */ }
3344 else
3345 {
3346 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3347 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3348 }
3349#else
3350 RT_NOREF(pVmcsInfo);
3351#endif
3352}
3353
3354
3355/**
3356 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3357 *
3358 * @param pVCpu The cross context virtual CPU structure.
3359 * @param pVmcsInfo The VMCS info. object.
3360 *
3361 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3362 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3363 * instead!!!
3364 */
3365DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3366{
3367 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3368 {
3369 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3370 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3371 }
3372}
3373
3374
3375/**
3376 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3377 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3378 */
3379DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3380{
3381 /*
3382 * We must import RIP here to set our EM interrupt-inhibited state.
3383 * We also import RFLAGS as our code that evaluates pending interrupts
3384 * before VM-entry requires it.
3385 */
3386 vmxHCImportGuestRip(pVCpu);
3387 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3388
3389 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3390 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3391 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3392 pVCpu->cpum.GstCtx.rip);
3393 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3394}
3395
3396
3397/**
3398 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3399 * context.
3400 *
3401 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3402 *
3403 * @param pVCpu The cross context virtual CPU structure.
3404 * @param pVmcsInfo The VMCS info. object.
3405 *
3406 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3407 * do not log!
3408 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3409 * instead!!!
3410 */
3411DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3412{
3413 uint32_t u32Val;
3414 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3415 if (!u32Val)
3416 {
3417 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3418 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3419 }
3420 else
3421 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3422}
3423
3424
3425/**
3426 * Worker for VMXR0ImportStateOnDemand.
3427 *
3428 * @returns VBox status code.
3429 * @param pVCpu The cross context virtual CPU structure.
3430 * @param pVmcsInfo The VMCS info. object.
3431 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3432 */
3433static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3434{
3435 int rc = VINF_SUCCESS;
3436 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3437 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3438 uint32_t u32Val;
3439
3440 /*
3441 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3442 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3443 * neither are other host platforms.
3444 *
3445 * Committing this temporarily as it prevents BSOD.
3446 *
3447 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3448 */
3449#ifdef RT_OS_WINDOWS
3450 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3451 return VERR_HM_IPE_1;
3452#endif
3453
3454 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3455
3456#ifndef IN_NEM_DARWIN
3457 /*
3458 * We disable interrupts to make the updating of the state and in particular
3459 * the fExtrn modification atomic wrt to preemption hooks.
3460 */
3461 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3462#endif
3463
3464 fWhat &= pCtx->fExtrn;
3465 if (fWhat)
3466 {
3467 do
3468 {
3469 if (fWhat & CPUMCTX_EXTRN_RIP)
3470 vmxHCImportGuestRip(pVCpu);
3471
3472 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3473 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3474
3475 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3476 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3477 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3478
3479 if (fWhat & CPUMCTX_EXTRN_RSP)
3480 {
3481 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3482 AssertRC(rc);
3483 }
3484
3485 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3486 {
3487 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3488#ifndef IN_NEM_DARWIN
3489 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3490#else
3491 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3492#endif
3493 if (fWhat & CPUMCTX_EXTRN_CS)
3494 {
3495 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3496 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3497 if (fRealOnV86Active)
3498 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3499 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3500 }
3501 if (fWhat & CPUMCTX_EXTRN_SS)
3502 {
3503 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3504 if (fRealOnV86Active)
3505 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3506 }
3507 if (fWhat & CPUMCTX_EXTRN_DS)
3508 {
3509 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3510 if (fRealOnV86Active)
3511 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3512 }
3513 if (fWhat & CPUMCTX_EXTRN_ES)
3514 {
3515 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3516 if (fRealOnV86Active)
3517 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3518 }
3519 if (fWhat & CPUMCTX_EXTRN_FS)
3520 {
3521 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3522 if (fRealOnV86Active)
3523 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3524 }
3525 if (fWhat & CPUMCTX_EXTRN_GS)
3526 {
3527 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3528 if (fRealOnV86Active)
3529 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3530 }
3531 }
3532
3533 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3534 {
3535 if (fWhat & CPUMCTX_EXTRN_LDTR)
3536 vmxHCImportGuestLdtr(pVCpu);
3537
3538 if (fWhat & CPUMCTX_EXTRN_GDTR)
3539 {
3540 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3541 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3542 pCtx->gdtr.cbGdt = u32Val;
3543 }
3544
3545 /* Guest IDTR. */
3546 if (fWhat & CPUMCTX_EXTRN_IDTR)
3547 {
3548 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3549 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3550 pCtx->idtr.cbIdt = u32Val;
3551 }
3552
3553 /* Guest TR. */
3554 if (fWhat & CPUMCTX_EXTRN_TR)
3555 {
3556#ifndef IN_NEM_DARWIN
3557 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3558 don't need to import that one. */
3559 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3560#endif
3561 vmxHCImportGuestTr(pVCpu);
3562 }
3563 }
3564
3565 if (fWhat & CPUMCTX_EXTRN_DR7)
3566 {
3567#ifndef IN_NEM_DARWIN
3568 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3569#endif
3570 {
3571 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3572 AssertRC(rc);
3573 }
3574 }
3575
3576 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3577 {
3578 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3579 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3580 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3581 pCtx->SysEnter.cs = u32Val;
3582 }
3583
3584#ifndef IN_NEM_DARWIN
3585 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3586 {
3587 if ( pVM->hmr0.s.fAllow64BitGuests
3588 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3589 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3590 }
3591
3592 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3593 {
3594 if ( pVM->hmr0.s.fAllow64BitGuests
3595 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3596 {
3597 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3598 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3599 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3600 }
3601 }
3602
3603 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3604 {
3605 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3606 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3607 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3608 Assert(pMsrs);
3609 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3610 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3611 for (uint32_t i = 0; i < cMsrs; i++)
3612 {
3613 uint32_t const idMsr = pMsrs[i].u32Msr;
3614 switch (idMsr)
3615 {
3616 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3617 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3618 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3619 default:
3620 {
3621 uint32_t idxLbrMsr;
3622 if (VM_IS_VMX_LBR(pVM))
3623 {
3624 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3625 {
3626 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3627 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3628 break;
3629 }
3630 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3631 {
3632 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3633 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3634 break;
3635 }
3636 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3637 {
3638 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3639 break;
3640 }
3641 /* Fallthru (no break) */
3642 }
3643 pCtx->fExtrn = 0;
3644 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3645 ASMSetFlags(fEFlags);
3646 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3647 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3648 }
3649 }
3650 }
3651 }
3652#endif
3653
3654 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3655 {
3656 if (fWhat & CPUMCTX_EXTRN_CR0)
3657 {
3658 uint64_t u64Cr0;
3659 uint64_t u64Shadow;
3660 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3661 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3662#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3663 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3664 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3665#else
3666 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3667 {
3668 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3669 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3670 }
3671 else
3672 {
3673 /*
3674 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3675 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3676 * re-construct CR0. See @bugref{9180#c95} for details.
3677 */
3678 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3679 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3680 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3681 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3682 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3683 }
3684#endif
3685#ifndef IN_NEM_DARWIN
3686 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3687#endif
3688 CPUMSetGuestCR0(pVCpu, u64Cr0);
3689#ifndef IN_NEM_DARWIN
3690 VMMRZCallRing3Enable(pVCpu);
3691#endif
3692 }
3693
3694 if (fWhat & CPUMCTX_EXTRN_CR4)
3695 {
3696 uint64_t u64Cr4;
3697 uint64_t u64Shadow;
3698 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3699 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3700#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3701 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3702 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3703#else
3704 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3705 {
3706 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3707 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3708 }
3709 else
3710 {
3711 /*
3712 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3713 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3714 * re-construct CR4. See @bugref{9180#c95} for details.
3715 */
3716 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3717 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3718 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3719 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3720 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3721 }
3722#endif
3723 pCtx->cr4 = u64Cr4;
3724 }
3725
3726 if (fWhat & CPUMCTX_EXTRN_CR3)
3727 {
3728 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3729 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3730 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3731 && CPUMIsGuestPagingEnabledEx(pCtx)))
3732 {
3733 uint64_t u64Cr3;
3734 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3735 if (pCtx->cr3 != u64Cr3)
3736 {
3737 pCtx->cr3 = u64Cr3;
3738 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3739 }
3740
3741 /*
3742 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3743 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3744 */
3745 if (CPUMIsGuestInPAEModeEx(pCtx))
3746 {
3747 X86PDPE aPaePdpes[4];
3748 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3749 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3750 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3751 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3752 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3753 {
3754 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3755 /* PGM now updates PAE PDPTEs while updating CR3. */
3756 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3757 }
3758 }
3759 }
3760 }
3761 }
3762
3763#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3764 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3765 {
3766 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3767 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3768 {
3769 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3770 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3771 if (RT_SUCCESS(rc))
3772 { /* likely */ }
3773 else
3774 break;
3775 }
3776 }
3777#endif
3778 } while (0);
3779
3780 if (RT_SUCCESS(rc))
3781 {
3782 /* Update fExtrn. */
3783 pCtx->fExtrn &= ~fWhat;
3784
3785 /* If everything has been imported, clear the HM keeper bit. */
3786 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3787 {
3788#ifndef IN_NEM_DARWIN
3789 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3790#else
3791 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3792#endif
3793 Assert(!pCtx->fExtrn);
3794 }
3795 }
3796 }
3797#ifndef IN_NEM_DARWIN
3798 else
3799 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3800
3801 /*
3802 * Restore interrupts.
3803 */
3804 ASMSetFlags(fEFlags);
3805#endif
3806
3807 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3808
3809 if (RT_SUCCESS(rc))
3810 { /* likely */ }
3811 else
3812 return rc;
3813
3814 /*
3815 * Honor any pending CR3 updates.
3816 *
3817 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3818 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3819 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3820 *
3821 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3822 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3823 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3824 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3825 *
3826 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3827 *
3828 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3829 */
3830 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3831#ifndef IN_NEM_DARWIN
3832 && VMMRZCallRing3IsEnabled(pVCpu)
3833#endif
3834 )
3835 {
3836 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3837 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3838 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3839 }
3840
3841 return VINF_SUCCESS;
3842}
3843
3844
3845/**
3846 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3847 *
3848 * @returns VBox status code.
3849 * @param pVCpu The cross context virtual CPU structure.
3850 * @param pVmcsInfo The VMCS info. object.
3851 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3852 * in NEM/darwin context.
3853 * @tparam a_fWhat What to import, zero or more bits from
3854 * HMVMX_CPUMCTX_EXTRN_ALL.
3855 */
3856template<uint64_t const a_fWhat>
3857static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3858{
3859 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3860 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3861 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3862 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3863
3864 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3865
3866 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3867
3868 /* RIP and RFLAGS may have been imported already by the post exit code
3869 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3870 of the code is skipping this part of the code. */
3871 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3872 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3873 {
3874 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3875 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3876
3877 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3878 {
3879 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3880 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3881 else
3882 vmxHCImportGuestCoreRip(pVCpu);
3883 }
3884 }
3885
3886 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3887 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3888 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3889
3890 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3891 {
3892 if (a_fWhat & CPUMCTX_EXTRN_CS)
3893 {
3894 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3895 /** @todo try get rid of this carp, it smells and is probably never ever
3896 * used: */
3897 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3898 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3899 {
3900 vmxHCImportGuestCoreRip(pVCpu);
3901 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3902 }
3903 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3904 }
3905 if (a_fWhat & CPUMCTX_EXTRN_SS)
3906 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3907 if (a_fWhat & CPUMCTX_EXTRN_DS)
3908 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3909 if (a_fWhat & CPUMCTX_EXTRN_ES)
3910 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3911 if (a_fWhat & CPUMCTX_EXTRN_FS)
3912 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3913 if (a_fWhat & CPUMCTX_EXTRN_GS)
3914 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3915
3916 /* Guest TR.
3917 Real-mode emulation using virtual-8086 mode has the fake TSS
3918 (pRealModeTSS) in TR, don't need to import that one. */
3919#ifndef IN_NEM_DARWIN
3920 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3921 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3922 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3923#else
3924 if (a_fWhat & CPUMCTX_EXTRN_TR)
3925#endif
3926 vmxHCImportGuestTr(pVCpu);
3927
3928#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3929 if (fRealOnV86Active)
3930 {
3931 if (a_fWhat & CPUMCTX_EXTRN_CS)
3932 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3933 if (a_fWhat & CPUMCTX_EXTRN_SS)
3934 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3935 if (a_fWhat & CPUMCTX_EXTRN_DS)
3936 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3937 if (a_fWhat & CPUMCTX_EXTRN_ES)
3938 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3939 if (a_fWhat & CPUMCTX_EXTRN_FS)
3940 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3941 if (a_fWhat & CPUMCTX_EXTRN_GS)
3942 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3943 }
3944#endif
3945 }
3946
3947 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3948 {
3949 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3950 AssertRC(rc);
3951 }
3952
3953 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3954 vmxHCImportGuestLdtr(pVCpu);
3955
3956 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3957 {
3958 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3959 uint32_t u32Val;
3960 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3961 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3962 }
3963
3964 /* Guest IDTR. */
3965 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3966 {
3967 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3968 uint32_t u32Val;
3969 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3970 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3971 }
3972
3973 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3974 {
3975#ifndef IN_NEM_DARWIN
3976 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3977#endif
3978 {
3979 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3980 AssertRC(rc);
3981 }
3982 }
3983
3984 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3985 {
3986 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
3987 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
3988 uint32_t u32Val;
3989 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
3990 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
3991 }
3992
3993#ifndef IN_NEM_DARWIN
3994 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3995 {
3996 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
3997 && pVM->hmr0.s.fAllow64BitGuests)
3998 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3999 }
4000
4001 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4002 {
4003 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4004 && pVM->hmr0.s.fAllow64BitGuests)
4005 {
4006 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4007 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4008 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4009 }
4010 }
4011
4012 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4013 {
4014 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4015 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4016 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4017 Assert(pMsrs);
4018 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4019 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4020 for (uint32_t i = 0; i < cMsrs; i++)
4021 {
4022 uint32_t const idMsr = pMsrs[i].u32Msr;
4023 switch (idMsr)
4024 {
4025 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4026 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4027 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4028 default:
4029 {
4030 uint32_t idxLbrMsr;
4031 if (VM_IS_VMX_LBR(pVM))
4032 {
4033 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4034 {
4035 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4036 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4037 break;
4038 }
4039 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4040 {
4041 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4042 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4043 break;
4044 }
4045 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4046 {
4047 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4048 break;
4049 }
4050 }
4051 pVCpu->cpum.GstCtx.fExtrn = 0;
4052 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4053 ASMSetFlags(fEFlags);
4054 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4055 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4056 }
4057 }
4058 }
4059 }
4060#endif
4061
4062 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4063 {
4064 uint64_t u64Cr0;
4065 uint64_t u64Shadow;
4066 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4067 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4068#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4069 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4070 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4071#else
4072 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4073 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4074 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4075 else
4076 {
4077 /*
4078 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4079 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4080 * re-construct CR0. See @bugref{9180#c95} for details.
4081 */
4082 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4083 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4084 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4085 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4086 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4087 }
4088#endif
4089#ifndef IN_NEM_DARWIN
4090 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4091#endif
4092 CPUMSetGuestCR0(pVCpu, u64Cr0);
4093#ifndef IN_NEM_DARWIN
4094 VMMRZCallRing3Enable(pVCpu);
4095#endif
4096 }
4097
4098 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4099 {
4100 uint64_t u64Cr4;
4101 uint64_t u64Shadow;
4102 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4103 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4104#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4105 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4106 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4107#else
4108 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4109 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4110 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4111 else
4112 {
4113 /*
4114 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4115 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4116 * re-construct CR4. See @bugref{9180#c95} for details.
4117 */
4118 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4119 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4120 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4121 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4122 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4123 }
4124#endif
4125 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4126 }
4127
4128 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4129 {
4130 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4131 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4132 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4133 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4134 {
4135 uint64_t u64Cr3;
4136 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4137 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4138 {
4139 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4140 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4141 }
4142
4143 /*
4144 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4145 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4146 */
4147 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4148 {
4149 X86PDPE aPaePdpes[4];
4150 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4151 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4152 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4153 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4154 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4155 {
4156 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4157 /* PGM now updates PAE PDPTEs while updating CR3. */
4158 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4159 }
4160 }
4161 }
4162 }
4163
4164#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4165 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4166 {
4167 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4168 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4169 {
4170 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4171 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4172 AssertRCReturn(rc, rc);
4173 }
4174 }
4175#endif
4176
4177 /* Update fExtrn. */
4178 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4179
4180 /* If everything has been imported, clear the HM keeper bit. */
4181 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4182 {
4183#ifndef IN_NEM_DARWIN
4184 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4185#else
4186 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4187#endif
4188 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4189 }
4190
4191 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4192
4193 /*
4194 * Honor any pending CR3 updates.
4195 *
4196 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4197 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4198 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4199 *
4200 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4201 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4202 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4203 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4204 *
4205 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4206 *
4207 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4208 */
4209#ifndef IN_NEM_DARWIN
4210 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4211 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4212 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4213 return VINF_SUCCESS;
4214 ASMSetFlags(fEFlags);
4215#else
4216 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4217 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4218 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4219 return VINF_SUCCESS;
4220 RT_NOREF_PV(fEFlags);
4221#endif
4222
4223 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4224 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4225 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4226 return VINF_SUCCESS;
4227}
4228
4229
4230/**
4231 * Internal state fetcher.
4232 *
4233 * @returns VBox status code.
4234 * @param pVCpu The cross context virtual CPU structure.
4235 * @param pVmcsInfo The VMCS info. object.
4236 * @param pszCaller For logging.
4237 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4238 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4239 * already. This is ORed together with @a a_fWhat when
4240 * calculating what needs fetching (just for safety).
4241 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4242 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4243 * already. This is ORed together with @a a_fWhat when
4244 * calculating what needs fetching (just for safety).
4245 */
4246template<uint64_t const a_fWhat,
4247 uint64_t const a_fDoneLocal = 0,
4248 uint64_t const a_fDonePostExit = 0
4249#ifndef IN_NEM_DARWIN
4250 | CPUMCTX_EXTRN_INHIBIT_INT
4251 | CPUMCTX_EXTRN_INHIBIT_NMI
4252# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4253 | HMVMX_CPUMCTX_EXTRN_ALL
4254# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4255 | CPUMCTX_EXTRN_RFLAGS
4256# endif
4257#else /* IN_NEM_DARWIN */
4258 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4259#endif /* IN_NEM_DARWIN */
4260>
4261DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4262{
4263 RT_NOREF_PV(pszCaller);
4264 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4265 {
4266#ifndef IN_NEM_DARWIN
4267 /*
4268 * We disable interrupts to make the updating of the state and in particular
4269 * the fExtrn modification atomic wrt to preemption hooks.
4270 */
4271 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4272#else
4273 RTCCUINTREG const fEFlags = 0;
4274#endif
4275
4276 /*
4277 * We combine all three parameters and take the (probably) inlined optimized
4278 * code path for the new things specified in a_fWhat.
4279 *
4280 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4281 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4282 * also take the streamlined path when both of these are cleared in fExtrn
4283 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4284 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4285 */
4286 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4287 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4288 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4289 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4290 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4291 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4292 {
4293 int const rc = vmxHCImportGuestStateInner< a_fWhat
4294 & HMVMX_CPUMCTX_EXTRN_ALL
4295 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4296#ifndef IN_NEM_DARWIN
4297 ASMSetFlags(fEFlags);
4298#endif
4299 return rc;
4300 }
4301
4302#ifndef IN_NEM_DARWIN
4303 ASMSetFlags(fEFlags);
4304#endif
4305
4306 /*
4307 * We shouldn't normally get here, but it may happen when executing
4308 * in the debug run-loops. Typically, everything should already have
4309 * been fetched then. Otherwise call the fallback state import function.
4310 */
4311 if (fWhatToDo == 0)
4312 { /* hope the cause was the debug loop or something similar */ }
4313 else
4314 {
4315 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4316 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4317 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4318 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4319 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4320 }
4321 }
4322 return VINF_SUCCESS;
4323}
4324
4325
4326/**
4327 * Check per-VM and per-VCPU force flag actions that require us to go back to
4328 * ring-3 for one reason or another.
4329 *
4330 * @returns Strict VBox status code (i.e. informational status codes too)
4331 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4332 * ring-3.
4333 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4334 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4335 * interrupts)
4336 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4337 * all EMTs to be in ring-3.
4338 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4339 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4340 * to the EM loop.
4341 *
4342 * @param pVCpu The cross context virtual CPU structure.
4343 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4344 * @param fStepping Whether we are single-stepping the guest using the
4345 * hypervisor debugger.
4346 *
4347 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4348 * is no longer in VMX non-root mode.
4349 */
4350static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4351{
4352#ifndef IN_NEM_DARWIN
4353 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4354#endif
4355
4356 /*
4357 * Update pending interrupts into the APIC's IRR.
4358 */
4359 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4360 APICUpdatePendingInterrupts(pVCpu);
4361
4362 /*
4363 * Anything pending? Should be more likely than not if we're doing a good job.
4364 */
4365 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4366 if ( !fStepping
4367 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4368 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4369 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4370 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4371 return VINF_SUCCESS;
4372
4373 /* Pending PGM C3 sync. */
4374 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4375 {
4376 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4377 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4378 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4379 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4380 if (rcStrict != VINF_SUCCESS)
4381 {
4382 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4383 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4384 return rcStrict;
4385 }
4386 }
4387
4388 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4389 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4390 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4391 {
4392 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4393 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4394 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4395 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4396 return rc;
4397 }
4398
4399 /* Pending VM request packets, such as hardware interrupts. */
4400 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4401 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4402 {
4403 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4404 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4405 return VINF_EM_PENDING_REQUEST;
4406 }
4407
4408 /* Pending PGM pool flushes. */
4409 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4410 {
4411 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4412 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4413 return VINF_PGM_POOL_FLUSH_PENDING;
4414 }
4415
4416 /* Pending DMA requests. */
4417 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4418 {
4419 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4420 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4421 return VINF_EM_RAW_TO_R3;
4422 }
4423
4424#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4425 /*
4426 * Pending nested-guest events.
4427 *
4428 * Please note the priority of these events are specified and important.
4429 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4430 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4431 */
4432 if (fIsNestedGuest)
4433 {
4434 /* Pending nested-guest APIC-write. */
4435 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4436 {
4437 Log4Func(("Pending nested-guest APIC-write\n"));
4438 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4439 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4440 return rcStrict;
4441 }
4442
4443 /* Pending nested-guest monitor-trap flag (MTF). */
4444 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4445 {
4446 Log4Func(("Pending nested-guest MTF\n"));
4447 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4448 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4449 return rcStrict;
4450 }
4451
4452 /* Pending nested-guest VMX-preemption timer expired. */
4453 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4454 {
4455 Log4Func(("Pending nested-guest preempt timer\n"));
4456 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4457 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4458 return rcStrict;
4459 }
4460 }
4461#else
4462 NOREF(fIsNestedGuest);
4463#endif
4464
4465 return VINF_SUCCESS;
4466}
4467
4468
4469/**
4470 * Converts any TRPM trap into a pending HM event. This is typically used when
4471 * entering from ring-3 (not longjmp returns).
4472 *
4473 * @param pVCpu The cross context virtual CPU structure.
4474 */
4475static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4476{
4477 Assert(TRPMHasTrap(pVCpu));
4478 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4479
4480 uint8_t uVector;
4481 TRPMEVENT enmTrpmEvent;
4482 uint32_t uErrCode;
4483 RTGCUINTPTR GCPtrFaultAddress;
4484 uint8_t cbInstr;
4485 bool fIcebp;
4486
4487 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4488 AssertRC(rc);
4489
4490 uint32_t u32IntInfo;
4491 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4492 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4493
4494 rc = TRPMResetTrap(pVCpu);
4495 AssertRC(rc);
4496 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4497 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4498
4499 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4500}
4501
4502
4503/**
4504 * Converts the pending HM event into a TRPM trap.
4505 *
4506 * @param pVCpu The cross context virtual CPU structure.
4507 */
4508static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4509{
4510 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4511
4512 /* If a trap was already pending, we did something wrong! */
4513 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4514
4515 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4516 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4517 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4518
4519 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4520
4521 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4522 AssertRC(rc);
4523
4524 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4525 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4526
4527 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4528 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4529 else
4530 {
4531 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4532 switch (uVectorType)
4533 {
4534 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4535 TRPMSetTrapDueToIcebp(pVCpu);
4536 RT_FALL_THRU();
4537 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4538 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4539 {
4540 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4541 || ( uVector == X86_XCPT_BP /* INT3 */
4542 || uVector == X86_XCPT_OF /* INTO */
4543 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4544 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4545 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4546 break;
4547 }
4548 }
4549 }
4550
4551 /* We're now done converting the pending event. */
4552 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4553}
4554
4555
4556/**
4557 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4558 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4559 *
4560 * @param pVCpu The cross context virtual CPU structure.
4561 * @param pVmcsInfo The VMCS info. object.
4562 */
4563static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4564{
4565 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4566 {
4567 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4568 {
4569 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4570 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4571 AssertRC(rc);
4572 }
4573 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4574}
4575
4576
4577/**
4578 * Clears the interrupt-window exiting control in the VMCS.
4579 *
4580 * @param pVCpu The cross context virtual CPU structure.
4581 * @param pVmcsInfo The VMCS info. object.
4582 */
4583DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4584{
4585 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4586 {
4587 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4588 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4589 AssertRC(rc);
4590 }
4591}
4592
4593
4594/**
4595 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4596 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4597 *
4598 * @param pVCpu The cross context virtual CPU structure.
4599 * @param pVmcsInfo The VMCS info. object.
4600 */
4601static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4602{
4603 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4604 {
4605 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4606 {
4607 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4608 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4609 AssertRC(rc);
4610 Log4Func(("Setup NMI-window exiting\n"));
4611 }
4612 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4613}
4614
4615
4616/**
4617 * Clears the NMI-window exiting control in the VMCS.
4618 *
4619 * @param pVCpu The cross context virtual CPU structure.
4620 * @param pVmcsInfo The VMCS info. object.
4621 */
4622DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4623{
4624 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4625 {
4626 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4627 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4628 AssertRC(rc);
4629 }
4630}
4631
4632
4633/**
4634 * Injects an event into the guest upon VM-entry by updating the relevant fields
4635 * in the VM-entry area in the VMCS.
4636 *
4637 * @returns Strict VBox status code (i.e. informational status codes too).
4638 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4639 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4640 *
4641 * @param pVCpu The cross context virtual CPU structure.
4642 * @param pVmcsInfo The VMCS info object.
4643 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4644 * @param pEvent The event being injected.
4645 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4646 * will be updated if necessary. This cannot not be NULL.
4647 * @param fStepping Whether we're single-stepping guest execution and should
4648 * return VINF_EM_DBG_STEPPED if the event is injected
4649 * directly (registers modified by us, not by hardware on
4650 * VM-entry).
4651 */
4652static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4653 bool fStepping, uint32_t *pfIntrState)
4654{
4655 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4656 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4657 Assert(pfIntrState);
4658
4659#ifdef IN_NEM_DARWIN
4660 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4661#endif
4662
4663 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4664 uint32_t u32IntInfo = pEvent->u64IntInfo;
4665 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4666 uint32_t const cbInstr = pEvent->cbInstr;
4667 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4668 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4669 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4670
4671#ifdef VBOX_STRICT
4672 /*
4673 * Validate the error-code-valid bit for hardware exceptions.
4674 * No error codes for exceptions in real-mode.
4675 *
4676 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4677 */
4678 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4679 && !CPUMIsGuestInRealModeEx(pCtx))
4680 {
4681 switch (uVector)
4682 {
4683 case X86_XCPT_PF:
4684 case X86_XCPT_DF:
4685 case X86_XCPT_TS:
4686 case X86_XCPT_NP:
4687 case X86_XCPT_SS:
4688 case X86_XCPT_GP:
4689 case X86_XCPT_AC:
4690 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4691 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4692 RT_FALL_THRU();
4693 default:
4694 break;
4695 }
4696 }
4697
4698 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4699 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4700 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4701#endif
4702
4703 RT_NOREF(uVector);
4704 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4705 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4706 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4707 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4708 {
4709 Assert(uVector <= X86_XCPT_LAST);
4710 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4711 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4712 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4713 }
4714 else
4715 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4716
4717 /*
4718 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4719 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4720 * interrupt handler in the (real-mode) guest.
4721 *
4722 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4723 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4724 */
4725 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4726 {
4727#ifndef IN_NEM_DARWIN
4728 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4729#endif
4730 {
4731 /*
4732 * For CPUs with unrestricted guest execution enabled and with the guest
4733 * in real-mode, we must not set the deliver-error-code bit.
4734 *
4735 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4736 */
4737 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4738 }
4739#ifndef IN_NEM_DARWIN
4740 else
4741 {
4742 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4743 Assert(PDMVmmDevHeapIsEnabled(pVM));
4744 Assert(pVM->hm.s.vmx.pRealModeTSS);
4745 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4746
4747 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4748 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4749 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4750 AssertRCReturn(rc2, rc2);
4751
4752 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4753 size_t const cbIdtEntry = sizeof(X86IDTR16);
4754 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4755 {
4756 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4757 if (uVector == X86_XCPT_DF)
4758 return VINF_EM_RESET;
4759
4760 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4761 No error codes for exceptions in real-mode. */
4762 if (uVector == X86_XCPT_GP)
4763 {
4764 static HMEVENT const s_EventXcptDf
4765 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4766 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4767 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4768 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4769 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4770 }
4771
4772 /*
4773 * If we're injecting an event with no valid IDT entry, inject a #GP.
4774 * No error codes for exceptions in real-mode.
4775 *
4776 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4777 */
4778 static HMEVENT const s_EventXcptGp
4779 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4780 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4781 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4782 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4783 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4784 }
4785
4786 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4787 uint16_t uGuestIp = pCtx->ip;
4788 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4789 {
4790 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4791 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4792 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4793 }
4794 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4795 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4796
4797 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4798 X86IDTR16 IdtEntry;
4799 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4800 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4801 AssertRCReturn(rc2, rc2);
4802
4803 /* Construct the stack frame for the interrupt/exception handler. */
4804 VBOXSTRICTRC rcStrict;
4805 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u);
4806 if (rcStrict == VINF_SUCCESS)
4807 {
4808 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4809 if (rcStrict == VINF_SUCCESS)
4810 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4811 }
4812
4813 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4814 if (rcStrict == VINF_SUCCESS)
4815 {
4816 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4817 pCtx->rip = IdtEntry.offSel;
4818 pCtx->cs.Sel = IdtEntry.uSel;
4819 pCtx->cs.ValidSel = IdtEntry.uSel;
4820 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4821 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4822 && uVector == X86_XCPT_PF)
4823 pCtx->cr2 = GCPtrFault;
4824
4825 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4826 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4827 | HM_CHANGED_GUEST_RSP);
4828
4829 /*
4830 * If we delivered a hardware exception (other than an NMI) and if there was
4831 * block-by-STI in effect, we should clear it.
4832 */
4833 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4834 {
4835 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4836 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4837 Log4Func(("Clearing inhibition due to STI\n"));
4838 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4839 }
4840
4841 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4842 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4843
4844 /*
4845 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4846 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4847 */
4848 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4849
4850 /*
4851 * If we eventually support nested-guest execution without unrestricted guest execution,
4852 * we should set fInterceptEvents here.
4853 */
4854 Assert(!fIsNestedGuest);
4855
4856 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4857 if (fStepping)
4858 rcStrict = VINF_EM_DBG_STEPPED;
4859 }
4860 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4861 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4862 return rcStrict;
4863 }
4864#else
4865 RT_NOREF(pVmcsInfo);
4866#endif
4867 }
4868
4869 /*
4870 * Validate.
4871 */
4872 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4873 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4874
4875 /*
4876 * Inject the event into the VMCS.
4877 */
4878 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4879 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4880 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4881 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4882 AssertRC(rc);
4883
4884 /*
4885 * Update guest CR2 if this is a page-fault.
4886 */
4887 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4888 pCtx->cr2 = GCPtrFault;
4889
4890 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4891 return VINF_SUCCESS;
4892}
4893
4894
4895/**
4896 * Evaluates the event to be delivered to the guest and sets it as the pending
4897 * event.
4898 *
4899 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4900 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4901 * NOT restore these force-flags.
4902 *
4903 * @returns Strict VBox status code (i.e. informational status codes too).
4904 * @param pVCpu The cross context virtual CPU structure.
4905 * @param pVmcsInfo The VMCS information structure.
4906 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4907 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4908 */
4909static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4910{
4911 Assert(pfIntrState);
4912 Assert(!TRPMHasTrap(pVCpu));
4913
4914 /*
4915 * Compute/update guest-interruptibility state related FFs.
4916 * The FFs will be used below while evaluating events to be injected.
4917 */
4918 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4919
4920 /*
4921 * Evaluate if a new event needs to be injected.
4922 * An event that's already pending has already performed all necessary checks.
4923 */
4924 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4925 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4926 {
4927 /** @todo SMI. SMIs take priority over NMIs. */
4928
4929 /*
4930 * NMIs.
4931 * NMIs take priority over external interrupts.
4932 */
4933#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4934 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4935#endif
4936 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4937 {
4938 /*
4939 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4940 *
4941 * For a nested-guest, the FF always indicates the outer guest's ability to
4942 * receive an NMI while the guest-interruptibility state bit depends on whether
4943 * the nested-hypervisor is using virtual-NMIs.
4944 */
4945 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4946 {
4947#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4948 if ( fIsNestedGuest
4949 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4950 return IEMExecVmxVmexitXcptNmi(pVCpu);
4951#endif
4952 vmxHCSetPendingXcptNmi(pVCpu);
4953 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4954 Log4Func(("NMI pending injection\n"));
4955
4956 /* We've injected the NMI, bail. */
4957 return VINF_SUCCESS;
4958 }
4959 if (!fIsNestedGuest)
4960 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4961 }
4962
4963 /*
4964 * External interrupts (PIC/APIC).
4965 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4966 * We cannot re-request the interrupt from the controller again.
4967 */
4968 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4969 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4970 {
4971 Assert(!DBGFIsStepping(pVCpu));
4972 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4973 AssertRC(rc);
4974
4975 /*
4976 * We must not check EFLAGS directly when executing a nested-guest, use
4977 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4978 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4979 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4980 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4981 *
4982 * See Intel spec. 25.4.1 "Event Blocking".
4983 */
4984 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4985 {
4986#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4987 if ( fIsNestedGuest
4988 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4989 {
4990 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4991 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4992 return rcStrict;
4993 }
4994#endif
4995 uint8_t u8Interrupt;
4996 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4997 if (RT_SUCCESS(rc))
4998 {
4999#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5000 if ( fIsNestedGuest
5001 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5002 {
5003 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5004 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5005 return rcStrict;
5006 }
5007#endif
5008 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5009 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5010 }
5011 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5012 {
5013 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5014
5015 if ( !fIsNestedGuest
5016 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5017 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5018 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5019
5020 /*
5021 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5022 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5023 * need to re-set this force-flag here.
5024 */
5025 }
5026 else
5027 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5028
5029 /* We've injected the interrupt or taken necessary action, bail. */
5030 return VINF_SUCCESS;
5031 }
5032 if (!fIsNestedGuest)
5033 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5034 }
5035 }
5036 else if (!fIsNestedGuest)
5037 {
5038 /*
5039 * An event is being injected or we are in an interrupt shadow. Check if another event is
5040 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5041 * the pending event.
5042 */
5043 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5044 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5045 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5046 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5047 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5048 }
5049 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5050
5051 return VINF_SUCCESS;
5052}
5053
5054
5055/**
5056 * Injects any pending events into the guest if the guest is in a state to
5057 * receive them.
5058 *
5059 * @returns Strict VBox status code (i.e. informational status codes too).
5060 * @param pVCpu The cross context virtual CPU structure.
5061 * @param pVmcsInfo The VMCS information structure.
5062 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5063 * @param fIntrState The VT-x guest-interruptibility state.
5064 * @param fStepping Whether we are single-stepping the guest using the
5065 * hypervisor debugger and should return
5066 * VINF_EM_DBG_STEPPED if the event was dispatched
5067 * directly.
5068 */
5069static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5070 uint32_t fIntrState, bool fStepping)
5071{
5072 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5073#ifndef IN_NEM_DARWIN
5074 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5075#endif
5076
5077#ifdef VBOX_STRICT
5078 /*
5079 * Verify guest-interruptibility state.
5080 *
5081 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5082 * since injecting an event may modify the interruptibility state and we must thus always
5083 * use fIntrState.
5084 */
5085 {
5086 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5087 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5088 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5089 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5090 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5091 Assert(!TRPMHasTrap(pVCpu));
5092 NOREF(fBlockMovSS); NOREF(fBlockSti);
5093 }
5094#endif
5095
5096 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5097 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5098 {
5099 /*
5100 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5101 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5102 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5103 *
5104 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5105 */
5106 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5107#ifdef VBOX_STRICT
5108 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5109 {
5110 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5111 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5112 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5113 }
5114 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5115 {
5116 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5117 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5118 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5119 }
5120#endif
5121 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5122 uIntType));
5123
5124 /*
5125 * Inject the event and get any changes to the guest-interruptibility state.
5126 *
5127 * The guest-interruptibility state may need to be updated if we inject the event
5128 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5129 */
5130 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5131 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5132
5133 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5134 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5135 else
5136 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5137 }
5138
5139 /*
5140 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5141 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5142 */
5143 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5144 && !fIsNestedGuest)
5145 {
5146 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5147
5148 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5149 {
5150 /*
5151 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5152 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5153 */
5154 Assert(!DBGFIsStepping(pVCpu));
5155 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5156 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5157 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5158 AssertRC(rc);
5159 }
5160 else
5161 {
5162 /*
5163 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5164 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5165 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5166 * we use MTF, so just make sure it's called before executing guest-code.
5167 */
5168 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5169 }
5170 }
5171 /* else: for nested-guest currently handling while merging controls. */
5172
5173 /*
5174 * Finally, update the guest-interruptibility state.
5175 *
5176 * This is required for the real-on-v86 software interrupt injection, for
5177 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5178 */
5179 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5180 AssertRC(rc);
5181
5182 /*
5183 * There's no need to clear the VM-entry interruption-information field here if we're not
5184 * injecting anything. VT-x clears the valid bit on every VM-exit.
5185 *
5186 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5187 */
5188
5189 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5190 return rcStrict;
5191}
5192
5193
5194/**
5195 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5196 * and update error record fields accordingly.
5197 *
5198 * @returns VMX_IGS_* error codes.
5199 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5200 * wrong with the guest state.
5201 *
5202 * @param pVCpu The cross context virtual CPU structure.
5203 * @param pVmcsInfo The VMCS info. object.
5204 *
5205 * @remarks This function assumes our cache of the VMCS controls
5206 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5207 */
5208static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5209{
5210#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5211#define HMVMX_CHECK_BREAK(expr, err) do { \
5212 if (!(expr)) { uError = (err); break; } \
5213 } while (0)
5214
5215 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5216 uint32_t uError = VMX_IGS_ERROR;
5217 uint32_t u32IntrState = 0;
5218#ifndef IN_NEM_DARWIN
5219 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5220 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5221#else
5222 bool const fUnrestrictedGuest = true;
5223#endif
5224 do
5225 {
5226 int rc;
5227
5228 /*
5229 * Guest-interruptibility state.
5230 *
5231 * Read this first so that any check that fails prior to those that actually
5232 * require the guest-interruptibility state would still reflect the correct
5233 * VMCS value and avoids causing further confusion.
5234 */
5235 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5236 AssertRC(rc);
5237
5238 uint32_t u32Val;
5239 uint64_t u64Val;
5240
5241 /*
5242 * CR0.
5243 */
5244 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5245 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5246 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5247 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5248 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5249 if (fUnrestrictedGuest)
5250 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5251
5252 uint64_t u64GuestCr0;
5253 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5254 AssertRC(rc);
5255 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5256 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5257 if ( !fUnrestrictedGuest
5258 && (u64GuestCr0 & X86_CR0_PG)
5259 && !(u64GuestCr0 & X86_CR0_PE))
5260 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5261
5262 /*
5263 * CR4.
5264 */
5265 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5266 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5267 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5268
5269 uint64_t u64GuestCr4;
5270 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5271 AssertRC(rc);
5272 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5273 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5274
5275 /*
5276 * IA32_DEBUGCTL MSR.
5277 */
5278 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5279 AssertRC(rc);
5280 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5281 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5282 {
5283 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5284 }
5285 uint64_t u64DebugCtlMsr = u64Val;
5286
5287#ifdef VBOX_STRICT
5288 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5289 AssertRC(rc);
5290 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5291#endif
5292 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5293
5294 /*
5295 * RIP and RFLAGS.
5296 */
5297 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5298 AssertRC(rc);
5299 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5300 if ( !fLongModeGuest
5301 || !pCtx->cs.Attr.n.u1Long)
5302 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5303 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5304 * must be identical if the "IA-32e mode guest" VM-entry
5305 * control is 1 and CS.L is 1. No check applies if the
5306 * CPU supports 64 linear-address bits. */
5307
5308 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5309 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5310 AssertRC(rc);
5311 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5312 VMX_IGS_RFLAGS_RESERVED);
5313 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5314 uint32_t const u32Eflags = u64Val;
5315
5316 if ( fLongModeGuest
5317 || ( fUnrestrictedGuest
5318 && !(u64GuestCr0 & X86_CR0_PE)))
5319 {
5320 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5321 }
5322
5323 uint32_t u32EntryInfo;
5324 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5325 AssertRC(rc);
5326 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5327 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5328
5329 /*
5330 * 64-bit checks.
5331 */
5332 if (fLongModeGuest)
5333 {
5334 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5335 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5336 }
5337
5338 if ( !fLongModeGuest
5339 && (u64GuestCr4 & X86_CR4_PCIDE))
5340 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5341
5342 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5343 * 51:32 beyond the processor's physical-address width are 0. */
5344
5345 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5346 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5347 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5348
5349#ifndef IN_NEM_DARWIN
5350 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5351 AssertRC(rc);
5352 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5353
5354 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5355 AssertRC(rc);
5356 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5357#endif
5358
5359 /*
5360 * PERF_GLOBAL MSR.
5361 */
5362 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5363 {
5364 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5365 AssertRC(rc);
5366 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5367 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5368 }
5369
5370 /*
5371 * PAT MSR.
5372 */
5373 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5374 {
5375 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5376 AssertRC(rc);
5377 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5378 for (unsigned i = 0; i < 8; i++)
5379 {
5380 uint8_t u8Val = (u64Val & 0xff);
5381 if ( u8Val != 0 /* UC */
5382 && u8Val != 1 /* WC */
5383 && u8Val != 4 /* WT */
5384 && u8Val != 5 /* WP */
5385 && u8Val != 6 /* WB */
5386 && u8Val != 7 /* UC- */)
5387 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5388 u64Val >>= 8;
5389 }
5390 }
5391
5392 /*
5393 * EFER MSR.
5394 */
5395 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5396 {
5397 Assert(g_fHmVmxSupportsVmcsEfer);
5398 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5399 AssertRC(rc);
5400 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5401 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5402 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5403 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5404 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5405 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5406 * iemVmxVmentryCheckGuestState(). */
5407 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5408 || !(u64GuestCr0 & X86_CR0_PG)
5409 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5410 VMX_IGS_EFER_LMA_LME_MISMATCH);
5411 }
5412
5413 /*
5414 * Segment registers.
5415 */
5416 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5417 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5418 if (!(u32Eflags & X86_EFL_VM))
5419 {
5420 /* CS */
5421 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5422 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5423 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5424 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5425 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5426 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5427 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5428 /* CS cannot be loaded with NULL in protected mode. */
5429 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5430 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5431 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5432 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5433 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5434 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5435 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5436 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5437 else
5438 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5439
5440 /* SS */
5441 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5442 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5443 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5444 if ( !(pCtx->cr0 & X86_CR0_PE)
5445 || pCtx->cs.Attr.n.u4Type == 3)
5446 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5447
5448 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5449 {
5450 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5451 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5452 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5453 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5454 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5455 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5456 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5457 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5458 }
5459
5460 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5461 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5462 {
5463 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5464 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5465 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5466 || pCtx->ds.Attr.n.u4Type > 11
5467 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5468 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5469 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5470 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5471 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5472 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5473 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5474 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5475 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5476 }
5477 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5478 {
5479 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5480 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5481 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5482 || pCtx->es.Attr.n.u4Type > 11
5483 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5484 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5485 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5486 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5487 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5488 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5489 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5490 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5491 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5492 }
5493 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5494 {
5495 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5496 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5497 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5498 || pCtx->fs.Attr.n.u4Type > 11
5499 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5500 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5501 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5502 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5503 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5504 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5505 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5506 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5507 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5508 }
5509 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5510 {
5511 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5512 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5513 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5514 || pCtx->gs.Attr.n.u4Type > 11
5515 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5516 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5517 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5518 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5519 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5520 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5521 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5522 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5523 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5524 }
5525 /* 64-bit capable CPUs. */
5526 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5527 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5528 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5529 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5530 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5531 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5532 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5533 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5534 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5535 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5536 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5537 }
5538 else
5539 {
5540 /* V86 mode checks. */
5541 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5542 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5543 {
5544 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5545 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5546 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5547 }
5548 else
5549 {
5550 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5551 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5552 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5553 }
5554
5555 /* CS */
5556 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5557 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5558 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5559 /* SS */
5560 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5561 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5562 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5563 /* DS */
5564 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5565 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5566 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5567 /* ES */
5568 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5569 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5570 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5571 /* FS */
5572 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5573 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5574 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5575 /* GS */
5576 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5577 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5578 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5579 /* 64-bit capable CPUs. */
5580 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5581 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5582 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5583 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5584 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5585 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5586 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5587 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5588 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5589 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5590 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5591 }
5592
5593 /*
5594 * TR.
5595 */
5596 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5597 /* 64-bit capable CPUs. */
5598 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5599 if (fLongModeGuest)
5600 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5601 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5602 else
5603 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5604 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5605 VMX_IGS_TR_ATTR_TYPE_INVALID);
5606 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5607 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5608 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5609 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5610 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5611 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5612 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5613 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5614
5615 /*
5616 * GDTR and IDTR (64-bit capable checks).
5617 */
5618 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5619 AssertRC(rc);
5620 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5621
5622 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5623 AssertRC(rc);
5624 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5625
5626 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5627 AssertRC(rc);
5628 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5629
5630 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5631 AssertRC(rc);
5632 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5633
5634 /*
5635 * Guest Non-Register State.
5636 */
5637 /* Activity State. */
5638 uint32_t u32ActivityState;
5639 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5640 AssertRC(rc);
5641 HMVMX_CHECK_BREAK( !u32ActivityState
5642 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5643 VMX_IGS_ACTIVITY_STATE_INVALID);
5644 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5645 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5646
5647 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5648 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5649 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5650
5651 /** @todo Activity state and injecting interrupts. Left as a todo since we
5652 * currently don't use activity states but ACTIVE. */
5653
5654 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5655 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5656
5657 /* Guest interruptibility-state. */
5658 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5659 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5660 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5661 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5662 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5663 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5664 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5665 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5666 {
5667 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5668 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5669 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5670 }
5671 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5672 {
5673 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5674 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5675 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5676 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5677 }
5678 /** @todo Assumes the processor is not in SMM. */
5679 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5680 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5681 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5682 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5683 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5684 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5685 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5686 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5687
5688 /* Pending debug exceptions. */
5689 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5690 AssertRC(rc);
5691 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5692 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5693 u32Val = u64Val; /* For pending debug exceptions checks below. */
5694
5695 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5696 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5697 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5698 {
5699 if ( (u32Eflags & X86_EFL_TF)
5700 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5701 {
5702 /* Bit 14 is PendingDebug.BS. */
5703 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5704 }
5705 if ( !(u32Eflags & X86_EFL_TF)
5706 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5707 {
5708 /* Bit 14 is PendingDebug.BS. */
5709 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5710 }
5711 }
5712
5713#ifndef IN_NEM_DARWIN
5714 /* VMCS link pointer. */
5715 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5716 AssertRC(rc);
5717 if (u64Val != UINT64_C(0xffffffffffffffff))
5718 {
5719 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5720 /** @todo Bits beyond the processor's physical-address width MBZ. */
5721 /** @todo SMM checks. */
5722 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5723 Assert(pVmcsInfo->pvShadowVmcs);
5724 VMXVMCSREVID VmcsRevId;
5725 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5726 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5727 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5728 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5729 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5730 }
5731
5732 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5733 * not using nested paging? */
5734 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5735 && !fLongModeGuest
5736 && CPUMIsGuestInPAEModeEx(pCtx))
5737 {
5738 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5739 AssertRC(rc);
5740 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5741
5742 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5743 AssertRC(rc);
5744 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5745
5746 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5747 AssertRC(rc);
5748 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5749
5750 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5751 AssertRC(rc);
5752 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5753 }
5754#endif
5755
5756 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5757 if (uError == VMX_IGS_ERROR)
5758 uError = VMX_IGS_REASON_NOT_FOUND;
5759 } while (0);
5760
5761 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5762 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5763 return uError;
5764
5765#undef HMVMX_ERROR_BREAK
5766#undef HMVMX_CHECK_BREAK
5767}
5768
5769
5770#ifndef HMVMX_USE_FUNCTION_TABLE
5771/**
5772 * Handles a guest VM-exit from hardware-assisted VMX execution.
5773 *
5774 * @returns Strict VBox status code (i.e. informational status codes too).
5775 * @param pVCpu The cross context virtual CPU structure.
5776 * @param pVmxTransient The VMX-transient structure.
5777 */
5778DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5779{
5780#ifdef DEBUG_ramshankar
5781# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5782 do { \
5783 if (a_fSave != 0) \
5784 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5785 VBOXSTRICTRC rcStrict = a_CallExpr; \
5786 if (a_fSave != 0) \
5787 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5788 return rcStrict; \
5789 } while (0)
5790#else
5791# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5792#endif
5793 uint32_t const uExitReason = pVmxTransient->uExitReason;
5794 switch (uExitReason)
5795 {
5796 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5797 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5798 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5799 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5800 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5801 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5802 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5803 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5804 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5805 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5806 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5807 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5808 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5809 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5810 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5811 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5812 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5813 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5814 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5815 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5816 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5817 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5818 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5819 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5820 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5821 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5822 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5823 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5824 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5825 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5826#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5827 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5828 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5829 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5830 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5831 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5832 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5833 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5834 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5835 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5836 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5837#else
5838 case VMX_EXIT_VMCLEAR:
5839 case VMX_EXIT_VMLAUNCH:
5840 case VMX_EXIT_VMPTRLD:
5841 case VMX_EXIT_VMPTRST:
5842 case VMX_EXIT_VMREAD:
5843 case VMX_EXIT_VMRESUME:
5844 case VMX_EXIT_VMWRITE:
5845 case VMX_EXIT_VMXOFF:
5846 case VMX_EXIT_VMXON:
5847 case VMX_EXIT_INVVPID:
5848 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5849#endif
5850#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5851 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5852#else
5853 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5854#endif
5855
5856 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5857 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5858 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5859
5860 case VMX_EXIT_INIT_SIGNAL:
5861 case VMX_EXIT_SIPI:
5862 case VMX_EXIT_IO_SMI:
5863 case VMX_EXIT_SMI:
5864 case VMX_EXIT_ERR_MSR_LOAD:
5865 case VMX_EXIT_ERR_MACHINE_CHECK:
5866 case VMX_EXIT_PML_FULL:
5867 case VMX_EXIT_VIRTUALIZED_EOI:
5868 case VMX_EXIT_GDTR_IDTR_ACCESS:
5869 case VMX_EXIT_LDTR_TR_ACCESS:
5870 case VMX_EXIT_APIC_WRITE:
5871 case VMX_EXIT_RDRAND:
5872 case VMX_EXIT_RSM:
5873 case VMX_EXIT_VMFUNC:
5874 case VMX_EXIT_ENCLS:
5875 case VMX_EXIT_RDSEED:
5876 case VMX_EXIT_XSAVES:
5877 case VMX_EXIT_XRSTORS:
5878 case VMX_EXIT_UMWAIT:
5879 case VMX_EXIT_TPAUSE:
5880 case VMX_EXIT_LOADIWKEY:
5881 default:
5882 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5883 }
5884#undef VMEXIT_CALL_RET
5885}
5886#endif /* !HMVMX_USE_FUNCTION_TABLE */
5887
5888
5889#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5890/**
5891 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5892 *
5893 * @returns Strict VBox status code (i.e. informational status codes too).
5894 * @param pVCpu The cross context virtual CPU structure.
5895 * @param pVmxTransient The VMX-transient structure.
5896 */
5897DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5898{
5899 uint32_t const uExitReason = pVmxTransient->uExitReason;
5900 switch (uExitReason)
5901 {
5902# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5903 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5904 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5905# else
5906 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5907 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5908# endif
5909 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5910 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5911 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5912
5913 /*
5914 * We shouldn't direct host physical interrupts to the nested-guest.
5915 */
5916 case VMX_EXIT_EXT_INT:
5917 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5918
5919 /*
5920 * Instructions that cause VM-exits unconditionally or the condition is
5921 * always taken solely from the nested hypervisor (meaning if the VM-exit
5922 * happens, it's guaranteed to be a nested-guest VM-exit).
5923 *
5924 * - Provides VM-exit instruction length ONLY.
5925 */
5926 case VMX_EXIT_CPUID: /* Unconditional. */
5927 case VMX_EXIT_VMCALL:
5928 case VMX_EXIT_GETSEC:
5929 case VMX_EXIT_INVD:
5930 case VMX_EXIT_XSETBV:
5931 case VMX_EXIT_VMLAUNCH:
5932 case VMX_EXIT_VMRESUME:
5933 case VMX_EXIT_VMXOFF:
5934 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5935 case VMX_EXIT_VMFUNC:
5936 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5937
5938 /*
5939 * Instructions that cause VM-exits unconditionally or the condition is
5940 * always taken solely from the nested hypervisor (meaning if the VM-exit
5941 * happens, it's guaranteed to be a nested-guest VM-exit).
5942 *
5943 * - Provides VM-exit instruction length.
5944 * - Provides VM-exit information.
5945 * - Optionally provides Exit qualification.
5946 *
5947 * Since Exit qualification is 0 for all VM-exits where it is not
5948 * applicable, reading and passing it to the guest should produce
5949 * defined behavior.
5950 *
5951 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5952 */
5953 case VMX_EXIT_INVEPT: /* Unconditional. */
5954 case VMX_EXIT_INVVPID:
5955 case VMX_EXIT_VMCLEAR:
5956 case VMX_EXIT_VMPTRLD:
5957 case VMX_EXIT_VMPTRST:
5958 case VMX_EXIT_VMXON:
5959 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5960 case VMX_EXIT_LDTR_TR_ACCESS:
5961 case VMX_EXIT_RDRAND:
5962 case VMX_EXIT_RDSEED:
5963 case VMX_EXIT_XSAVES:
5964 case VMX_EXIT_XRSTORS:
5965 case VMX_EXIT_UMWAIT:
5966 case VMX_EXIT_TPAUSE:
5967 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5968
5969 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5970 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5971 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5972 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5973 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5974 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5975 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5976 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5977 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5978 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5979 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5980 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5981 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5982 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5983 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5984 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5985 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5986 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5987 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5988
5989 case VMX_EXIT_PREEMPT_TIMER:
5990 {
5991 /** @todo NSTVMX: Preempt timer. */
5992 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5993 }
5994
5995 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5996 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5997
5998 case VMX_EXIT_VMREAD:
5999 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
6000
6001 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
6002 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
6003
6004 case VMX_EXIT_INIT_SIGNAL:
6005 case VMX_EXIT_SIPI:
6006 case VMX_EXIT_IO_SMI:
6007 case VMX_EXIT_SMI:
6008 case VMX_EXIT_ERR_MSR_LOAD:
6009 case VMX_EXIT_ERR_MACHINE_CHECK:
6010 case VMX_EXIT_PML_FULL:
6011 case VMX_EXIT_RSM:
6012 default:
6013 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6014 }
6015}
6016#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6017
6018
6019/** @name VM-exit helpers.
6020 * @{
6021 */
6022/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6023/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6024/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6025
6026/** Macro for VM-exits called unexpectedly. */
6027#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6028 do { \
6029 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6030 return VERR_VMX_UNEXPECTED_EXIT; \
6031 } while (0)
6032
6033#ifdef VBOX_STRICT
6034# ifndef IN_NEM_DARWIN
6035/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6036# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6037 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6038
6039# define HMVMX_ASSERT_PREEMPT_CPUID() \
6040 do { \
6041 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6042 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6043 } while (0)
6044
6045# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6046 do { \
6047 AssertPtr((a_pVCpu)); \
6048 AssertPtr((a_pVmxTransient)); \
6049 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6050 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6051 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6052 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6053 Assert((a_pVmxTransient)->pVmcsInfo); \
6054 Assert(ASMIntAreEnabled()); \
6055 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6056 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6057 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6058 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6059 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6060 HMVMX_ASSERT_PREEMPT_CPUID(); \
6061 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6062 } while (0)
6063# else
6064# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6065# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6066# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6067 do { \
6068 AssertPtr((a_pVCpu)); \
6069 AssertPtr((a_pVmxTransient)); \
6070 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6071 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6072 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6073 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6074 Assert((a_pVmxTransient)->pVmcsInfo); \
6075 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6076 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6077 } while (0)
6078# endif
6079
6080# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6081 do { \
6082 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6083 Assert((a_pVmxTransient)->fIsNestedGuest); \
6084 } while (0)
6085
6086# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6087 do { \
6088 Log4Func(("\n")); \
6089 } while (0)
6090#else
6091# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6092 do { \
6093 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6094 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6095 } while (0)
6096
6097# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6098 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6099
6100# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6101#endif
6102
6103#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6104/** Macro that does the necessary privilege checks and intercepted VM-exits for
6105 * guests that attempted to execute a VMX instruction. */
6106# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6107 do \
6108 { \
6109 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6110 if (rcStrictTmp == VINF_SUCCESS) \
6111 { /* likely */ } \
6112 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6113 { \
6114 Assert((a_pVCpu)->hm.s.Event.fPending); \
6115 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6116 return VINF_SUCCESS; \
6117 } \
6118 else \
6119 { \
6120 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6121 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6122 } \
6123 } while (0)
6124
6125/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6126# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6127 do \
6128 { \
6129 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6130 (a_pGCPtrEffAddr)); \
6131 if (rcStrictTmp == VINF_SUCCESS) \
6132 { /* likely */ } \
6133 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6134 { \
6135 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6136 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6137 NOREF(uXcptTmp); \
6138 return VINF_SUCCESS; \
6139 } \
6140 else \
6141 { \
6142 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6143 return rcStrictTmp; \
6144 } \
6145 } while (0)
6146#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6147
6148
6149/**
6150 * Advances the guest RIP by the specified number of bytes.
6151 *
6152 * @param pVCpu The cross context virtual CPU structure.
6153 * @param cbInstr Number of bytes to advance the RIP by.
6154 *
6155 * @remarks No-long-jump zone!!!
6156 */
6157DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6158{
6159 /* Advance the RIP. */
6160 pVCpu->cpum.GstCtx.rip += cbInstr;
6161 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
6162 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
6163 /** @todo clear RF? */
6164}
6165
6166
6167/**
6168 * Advances the guest RIP after reading it from the VMCS.
6169 *
6170 * @returns VBox status code, no informational status codes.
6171 * @param pVCpu The cross context virtual CPU structure.
6172 * @param pVmxTransient The VMX-transient structure.
6173 *
6174 * @remarks No-long-jump zone!!!
6175 */
6176static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6177{
6178 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6179 /** @todo consider template here after checking callers. */
6180 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6181 AssertRCReturn(rc, rc);
6182
6183 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6184 return VINF_SUCCESS;
6185}
6186
6187
6188/**
6189 * Handle a condition that occurred while delivering an event through the guest or
6190 * nested-guest IDT.
6191 *
6192 * @returns Strict VBox status code (i.e. informational status codes too).
6193 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6194 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6195 * to continue execution of the guest which will delivery the \#DF.
6196 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6197 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6198 *
6199 * @param pVCpu The cross context virtual CPU structure.
6200 * @param pVmxTransient The VMX-transient structure.
6201 *
6202 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6203 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6204 * is due to an EPT violation, PML full or SPP-related event.
6205 *
6206 * @remarks No-long-jump zone!!!
6207 */
6208static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6209{
6210 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6211 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6212 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6213 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6214 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6215 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6216
6217 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6218 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6219 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6220 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6221 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6222 {
6223 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6224 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6225
6226 /*
6227 * If the event was a software interrupt (generated with INT n) or a software exception
6228 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6229 * can handle the VM-exit and continue guest execution which will re-execute the
6230 * instruction rather than re-injecting the exception, as that can cause premature
6231 * trips to ring-3 before injection and involve TRPM which currently has no way of
6232 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6233 * the problem).
6234 */
6235 IEMXCPTRAISE enmRaise;
6236 IEMXCPTRAISEINFO fRaiseInfo;
6237 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6238 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6239 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6240 {
6241 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6242 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6243 }
6244 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6245 {
6246 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6247 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6248 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6249
6250 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6251 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6252
6253 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6254
6255 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6256 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6257 {
6258 pVmxTransient->fVectoringPF = true;
6259 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6260 }
6261 }
6262 else
6263 {
6264 /*
6265 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6266 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6267 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6268 */
6269 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6270 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6271 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6272 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6273 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6274 }
6275
6276 /*
6277 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6278 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6279 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6280 * subsequent VM-entry would fail, see @bugref{7445}.
6281 *
6282 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6283 */
6284 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6285 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6286 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6287 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6288 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6289
6290 switch (enmRaise)
6291 {
6292 case IEMXCPTRAISE_CURRENT_XCPT:
6293 {
6294 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6295 Assert(rcStrict == VINF_SUCCESS);
6296 break;
6297 }
6298
6299 case IEMXCPTRAISE_PREV_EVENT:
6300 {
6301 uint32_t u32ErrCode;
6302 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6303 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6304 else
6305 u32ErrCode = 0;
6306
6307 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6308 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6309 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6310 pVCpu->cpum.GstCtx.cr2);
6311
6312 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6313 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6314 Assert(rcStrict == VINF_SUCCESS);
6315 break;
6316 }
6317
6318 case IEMXCPTRAISE_REEXEC_INSTR:
6319 Assert(rcStrict == VINF_SUCCESS);
6320 break;
6321
6322 case IEMXCPTRAISE_DOUBLE_FAULT:
6323 {
6324 /*
6325 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6326 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6327 */
6328 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6329 {
6330 pVmxTransient->fVectoringDoublePF = true;
6331 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6332 pVCpu->cpum.GstCtx.cr2));
6333 rcStrict = VINF_SUCCESS;
6334 }
6335 else
6336 {
6337 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6338 vmxHCSetPendingXcptDF(pVCpu);
6339 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6340 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6341 rcStrict = VINF_HM_DOUBLE_FAULT;
6342 }
6343 break;
6344 }
6345
6346 case IEMXCPTRAISE_TRIPLE_FAULT:
6347 {
6348 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6349 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6350 rcStrict = VINF_EM_RESET;
6351 break;
6352 }
6353
6354 case IEMXCPTRAISE_CPU_HANG:
6355 {
6356 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6357 rcStrict = VERR_EM_GUEST_CPU_HANG;
6358 break;
6359 }
6360
6361 default:
6362 {
6363 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6364 rcStrict = VERR_VMX_IPE_2;
6365 break;
6366 }
6367 }
6368 }
6369 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6370 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6371 {
6372 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6373 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6374 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6375 {
6376 /*
6377 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6378 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6379 * that virtual NMIs remain blocked until the IRET execution is completed.
6380 *
6381 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6382 */
6383 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6384 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6385 }
6386 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6387 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6388 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6389 {
6390 /*
6391 * Execution of IRET caused an EPT violation, page-modification log-full event or
6392 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6393 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6394 * that virtual NMIs remain blocked until the IRET execution is completed.
6395 *
6396 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6397 */
6398 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6399 {
6400 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6401 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6402 }
6403 }
6404 }
6405
6406 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6407 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6408 return rcStrict;
6409}
6410
6411
6412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6413/**
6414 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6415 * guest attempting to execute a VMX instruction.
6416 *
6417 * @returns Strict VBox status code (i.e. informational status codes too).
6418 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6419 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6420 *
6421 * @param pVCpu The cross context virtual CPU structure.
6422 * @param uExitReason The VM-exit reason.
6423 *
6424 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6425 * @remarks No-long-jump zone!!!
6426 */
6427static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6428{
6429 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6430 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6431
6432 /*
6433 * The physical CPU would have already checked the CPU mode/code segment.
6434 * We shall just assert here for paranoia.
6435 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6436 */
6437 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6438 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6439 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6440
6441 if (uExitReason == VMX_EXIT_VMXON)
6442 {
6443 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6444
6445 /*
6446 * We check CR4.VMXE because it is required to be always set while in VMX operation
6447 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6448 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6449 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6450 */
6451 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6452 {
6453 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6454 vmxHCSetPendingXcptUD(pVCpu);
6455 return VINF_HM_PENDING_XCPT;
6456 }
6457 }
6458 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6459 {
6460 /*
6461 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6462 * (other than VMXON), we need to raise a #UD.
6463 */
6464 Log4Func(("Not in VMX root mode -> #UD\n"));
6465 vmxHCSetPendingXcptUD(pVCpu);
6466 return VINF_HM_PENDING_XCPT;
6467 }
6468
6469 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6470 return VINF_SUCCESS;
6471}
6472
6473
6474/**
6475 * Decodes the memory operand of an instruction that caused a VM-exit.
6476 *
6477 * The Exit qualification field provides the displacement field for memory
6478 * operand instructions, if any.
6479 *
6480 * @returns Strict VBox status code (i.e. informational status codes too).
6481 * @retval VINF_SUCCESS if the operand was successfully decoded.
6482 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6483 * operand.
6484 * @param pVCpu The cross context virtual CPU structure.
6485 * @param uExitInstrInfo The VM-exit instruction information field.
6486 * @param enmMemAccess The memory operand's access type (read or write).
6487 * @param GCPtrDisp The instruction displacement field, if any. For
6488 * RIP-relative addressing pass RIP + displacement here.
6489 * @param pGCPtrMem Where to store the effective destination memory address.
6490 *
6491 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6492 * virtual-8086 mode hence skips those checks while verifying if the
6493 * segment is valid.
6494 */
6495static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6496 PRTGCPTR pGCPtrMem)
6497{
6498 Assert(pGCPtrMem);
6499 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6500 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6501 | CPUMCTX_EXTRN_CR0);
6502
6503 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6504 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6505 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6506
6507 VMXEXITINSTRINFO ExitInstrInfo;
6508 ExitInstrInfo.u = uExitInstrInfo;
6509 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6510 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6511 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6512 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6513 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6514 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6515 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6516 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6517 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6518
6519 /*
6520 * Validate instruction information.
6521 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6522 */
6523 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6524 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6525 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6526 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6527 AssertLogRelMsgReturn(fIsMemOperand,
6528 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6529
6530 /*
6531 * Compute the complete effective address.
6532 *
6533 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6534 * See AMD spec. 4.5.2 "Segment Registers".
6535 */
6536 RTGCPTR GCPtrMem = GCPtrDisp;
6537 if (fBaseRegValid)
6538 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6539 if (fIdxRegValid)
6540 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6541
6542 RTGCPTR const GCPtrOff = GCPtrMem;
6543 if ( !fIsLongMode
6544 || iSegReg >= X86_SREG_FS)
6545 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6546 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6547
6548 /*
6549 * Validate effective address.
6550 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6551 */
6552 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6553 Assert(cbAccess > 0);
6554 if (fIsLongMode)
6555 {
6556 if (X86_IS_CANONICAL(GCPtrMem))
6557 {
6558 *pGCPtrMem = GCPtrMem;
6559 return VINF_SUCCESS;
6560 }
6561
6562 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6563 * "Data Limit Checks in 64-bit Mode". */
6564 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6565 vmxHCSetPendingXcptGP(pVCpu, 0);
6566 return VINF_HM_PENDING_XCPT;
6567 }
6568
6569 /*
6570 * This is a watered down version of iemMemApplySegment().
6571 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6572 * and segment CPL/DPL checks are skipped.
6573 */
6574 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6575 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6576 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6577
6578 /* Check if the segment is present and usable. */
6579 if ( pSel->Attr.n.u1Present
6580 && !pSel->Attr.n.u1Unusable)
6581 {
6582 Assert(pSel->Attr.n.u1DescType);
6583 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6584 {
6585 /* Check permissions for the data segment. */
6586 if ( enmMemAccess == VMXMEMACCESS_WRITE
6587 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6588 {
6589 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6590 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6591 return VINF_HM_PENDING_XCPT;
6592 }
6593
6594 /* Check limits if it's a normal data segment. */
6595 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6596 {
6597 if ( GCPtrFirst32 > pSel->u32Limit
6598 || GCPtrLast32 > pSel->u32Limit)
6599 {
6600 Log4Func(("Data segment limit exceeded. "
6601 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6602 GCPtrLast32, pSel->u32Limit));
6603 if (iSegReg == X86_SREG_SS)
6604 vmxHCSetPendingXcptSS(pVCpu, 0);
6605 else
6606 vmxHCSetPendingXcptGP(pVCpu, 0);
6607 return VINF_HM_PENDING_XCPT;
6608 }
6609 }
6610 else
6611 {
6612 /* Check limits if it's an expand-down data segment.
6613 Note! The upper boundary is defined by the B bit, not the G bit! */
6614 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6615 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6616 {
6617 Log4Func(("Expand-down data segment limit exceeded. "
6618 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6619 GCPtrLast32, pSel->u32Limit));
6620 if (iSegReg == X86_SREG_SS)
6621 vmxHCSetPendingXcptSS(pVCpu, 0);
6622 else
6623 vmxHCSetPendingXcptGP(pVCpu, 0);
6624 return VINF_HM_PENDING_XCPT;
6625 }
6626 }
6627 }
6628 else
6629 {
6630 /* Check permissions for the code segment. */
6631 if ( enmMemAccess == VMXMEMACCESS_WRITE
6632 || ( enmMemAccess == VMXMEMACCESS_READ
6633 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6634 {
6635 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6636 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6637 vmxHCSetPendingXcptGP(pVCpu, 0);
6638 return VINF_HM_PENDING_XCPT;
6639 }
6640
6641 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6642 if ( GCPtrFirst32 > pSel->u32Limit
6643 || GCPtrLast32 > pSel->u32Limit)
6644 {
6645 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6646 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6647 if (iSegReg == X86_SREG_SS)
6648 vmxHCSetPendingXcptSS(pVCpu, 0);
6649 else
6650 vmxHCSetPendingXcptGP(pVCpu, 0);
6651 return VINF_HM_PENDING_XCPT;
6652 }
6653 }
6654 }
6655 else
6656 {
6657 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6658 vmxHCSetPendingXcptGP(pVCpu, 0);
6659 return VINF_HM_PENDING_XCPT;
6660 }
6661
6662 *pGCPtrMem = GCPtrMem;
6663 return VINF_SUCCESS;
6664}
6665#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6666
6667
6668/**
6669 * VM-exit helper for LMSW.
6670 */
6671static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6672{
6673 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6674 AssertRCReturn(rc, rc);
6675
6676 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6677 AssertMsg( rcStrict == VINF_SUCCESS
6678 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6679
6680 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6681 if (rcStrict == VINF_IEM_RAISED_XCPT)
6682 {
6683 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6684 rcStrict = VINF_SUCCESS;
6685 }
6686
6687 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6688 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6689 return rcStrict;
6690}
6691
6692
6693/**
6694 * VM-exit helper for CLTS.
6695 */
6696static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6697{
6698 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6699 AssertRCReturn(rc, rc);
6700
6701 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6702 AssertMsg( rcStrict == VINF_SUCCESS
6703 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6704
6705 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6706 if (rcStrict == VINF_IEM_RAISED_XCPT)
6707 {
6708 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6709 rcStrict = VINF_SUCCESS;
6710 }
6711
6712 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6713 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6714 return rcStrict;
6715}
6716
6717
6718/**
6719 * VM-exit helper for MOV from CRx (CRx read).
6720 */
6721static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6722{
6723 Assert(iCrReg < 16);
6724 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6725
6726 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6727 AssertRCReturn(rc, rc);
6728
6729 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6730 AssertMsg( rcStrict == VINF_SUCCESS
6731 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6732
6733 if (iGReg == X86_GREG_xSP)
6734 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6735 else
6736 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6737#ifdef VBOX_WITH_STATISTICS
6738 switch (iCrReg)
6739 {
6740 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6741 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6742 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6743 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6744 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6745 }
6746#endif
6747 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6748 return rcStrict;
6749}
6750
6751
6752/**
6753 * VM-exit helper for MOV to CRx (CRx write).
6754 */
6755static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6756{
6757 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6758
6759 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6760 AssertMsg( rcStrict == VINF_SUCCESS
6761 || rcStrict == VINF_IEM_RAISED_XCPT
6762 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6763
6764 switch (iCrReg)
6765 {
6766 case 0:
6767 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6768 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6769 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6770 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6771 break;
6772
6773 case 2:
6774 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6775 /* Nothing to do here, CR2 it's not part of the VMCS. */
6776 break;
6777
6778 case 3:
6779 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6780 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6781 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6782 break;
6783
6784 case 4:
6785 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6786 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6787#ifndef IN_NEM_DARWIN
6788 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6789 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6790#else
6791 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6792#endif
6793 break;
6794
6795 case 8:
6796 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6797 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6798 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6799 break;
6800
6801 default:
6802 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6803 break;
6804 }
6805
6806 if (rcStrict == VINF_IEM_RAISED_XCPT)
6807 {
6808 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6809 rcStrict = VINF_SUCCESS;
6810 }
6811 return rcStrict;
6812}
6813
6814
6815/**
6816 * VM-exit exception handler for \#PF (Page-fault exception).
6817 *
6818 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6819 */
6820static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6821{
6822 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6823 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6824
6825#ifndef IN_NEM_DARWIN
6826 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6827 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6828 { /* likely */ }
6829 else
6830#endif
6831 {
6832#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6833 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6834#endif
6835 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6836 if (!pVmxTransient->fVectoringDoublePF)
6837 {
6838 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6839 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6840 }
6841 else
6842 {
6843 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6844 Assert(!pVmxTransient->fIsNestedGuest);
6845 vmxHCSetPendingXcptDF(pVCpu);
6846 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6847 }
6848 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6849 return VINF_SUCCESS;
6850 }
6851
6852 Assert(!pVmxTransient->fIsNestedGuest);
6853
6854 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6855 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6856 if (pVmxTransient->fVectoringPF)
6857 {
6858 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6859 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6860 }
6861
6862 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6863 AssertRCReturn(rc, rc);
6864
6865 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6866 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6867
6868 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6869 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6870
6871 Log4Func(("#PF: rc=%Rrc\n", rc));
6872 if (rc == VINF_SUCCESS)
6873 {
6874 /*
6875 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6876 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6877 */
6878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6879 TRPMResetTrap(pVCpu);
6880 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6881 return rc;
6882 }
6883
6884 if (rc == VINF_EM_RAW_GUEST_TRAP)
6885 {
6886 if (!pVmxTransient->fVectoringDoublePF)
6887 {
6888 /* It's a guest page fault and needs to be reflected to the guest. */
6889 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6890 TRPMResetTrap(pVCpu);
6891 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6892 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6893 uGstErrorCode, pVmxTransient->uExitQual);
6894 }
6895 else
6896 {
6897 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6898 TRPMResetTrap(pVCpu);
6899 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6900 vmxHCSetPendingXcptDF(pVCpu);
6901 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6902 }
6903
6904 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6905 return VINF_SUCCESS;
6906 }
6907
6908 TRPMResetTrap(pVCpu);
6909 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6910 return rc;
6911}
6912
6913
6914/**
6915 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6916 *
6917 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6918 */
6919static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6920{
6921 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6922 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6923
6924 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6925 AssertRCReturn(rc, rc);
6926
6927 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6928 {
6929 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6930 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6931
6932 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6933 * provides VM-exit instruction length. If this causes problem later,
6934 * disassemble the instruction like it's done on AMD-V. */
6935 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6936 AssertRCReturn(rc2, rc2);
6937 return rc;
6938 }
6939
6940 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6941 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6942 return VINF_SUCCESS;
6943}
6944
6945
6946/**
6947 * VM-exit exception handler for \#BP (Breakpoint exception).
6948 *
6949 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6950 */
6951static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6952{
6953 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6954 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6955
6956 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6957 AssertRCReturn(rc, rc);
6958
6959 VBOXSTRICTRC rcStrict;
6960 if (!pVmxTransient->fIsNestedGuest)
6961 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
6962 else
6963 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6964
6965 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6966 {
6967 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6968 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6969 rcStrict = VINF_SUCCESS;
6970 }
6971
6972 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6973 return rcStrict;
6974}
6975
6976
6977/**
6978 * VM-exit exception handler for \#AC (Alignment-check exception).
6979 *
6980 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6981 */
6982static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6983{
6984 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6985
6986 /*
6987 * Detect #ACs caused by host having enabled split-lock detection.
6988 * Emulate such instructions.
6989 */
6990#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
6991 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6992 AssertRCReturn(rc, rc);
6993 /** @todo detect split lock in cpu feature? */
6994 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6995 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6996 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6997 || CPUMGetGuestCPL(pVCpu) != 3
6998 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6999 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7000 {
7001 /*
7002 * Check for debug/trace events and import state accordingly.
7003 */
7004 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7005 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7006 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7007#ifndef IN_NEM_DARWIN
7008 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7009#endif
7010 )
7011 {
7012 if (pVM->cCpus == 1)
7013 {
7014#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7015 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7016 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7017#else
7018 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7019 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7020#endif
7021 AssertRCReturn(rc, rc);
7022 }
7023 }
7024 else
7025 {
7026 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7027 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7028 AssertRCReturn(rc, rc);
7029
7030 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7031
7032 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7033 {
7034 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7035 if (rcStrict != VINF_SUCCESS)
7036 return rcStrict;
7037 }
7038 }
7039
7040 /*
7041 * Emulate the instruction.
7042 *
7043 * We have to ignore the LOCK prefix here as we must not retrigger the
7044 * detection on the host. This isn't all that satisfactory, though...
7045 */
7046 if (pVM->cCpus == 1)
7047 {
7048 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7049 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7050
7051 /** @todo For SMP configs we should do a rendezvous here. */
7052 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7053 if (rcStrict == VINF_SUCCESS)
7054#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7055 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7056 HM_CHANGED_GUEST_RIP
7057 | HM_CHANGED_GUEST_RFLAGS
7058 | HM_CHANGED_GUEST_GPRS_MASK
7059 | HM_CHANGED_GUEST_CS
7060 | HM_CHANGED_GUEST_SS);
7061#else
7062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7063#endif
7064 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7065 {
7066 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7067 rcStrict = VINF_SUCCESS;
7068 }
7069 return rcStrict;
7070 }
7071 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7072 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7073 return VINF_EM_EMULATE_SPLIT_LOCK;
7074 }
7075
7076 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7077 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7078 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7079
7080 /* Re-inject it. We'll detect any nesting before getting here. */
7081 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7082 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7083 return VINF_SUCCESS;
7084}
7085
7086
7087/**
7088 * VM-exit exception handler for \#DB (Debug exception).
7089 *
7090 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7091 */
7092static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7093{
7094 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7095 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7096
7097 /*
7098 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7099 */
7100 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7101
7102 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7103 uint64_t const uDR6 = X86_DR6_INIT_VAL
7104 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7105 | X86_DR6_BD | X86_DR6_BS));
7106
7107 int rc;
7108 if (!pVmxTransient->fIsNestedGuest)
7109 {
7110 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7111
7112 /*
7113 * Prevents stepping twice over the same instruction when the guest is stepping using
7114 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7115 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7116 */
7117 if ( rc == VINF_EM_DBG_STEPPED
7118 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7119 {
7120 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7121 rc = VINF_EM_RAW_GUEST_TRAP;
7122 }
7123 }
7124 else
7125 rc = VINF_EM_RAW_GUEST_TRAP;
7126 Log6Func(("rc=%Rrc\n", rc));
7127 if (rc == VINF_EM_RAW_GUEST_TRAP)
7128 {
7129 /*
7130 * The exception was for the guest. Update DR6, DR7.GD and
7131 * IA32_DEBUGCTL.LBR before forwarding it.
7132 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
7133 */
7134#ifndef IN_NEM_DARWIN
7135 VMMRZCallRing3Disable(pVCpu);
7136 HM_DISABLE_PREEMPT(pVCpu);
7137
7138 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7139 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7140 if (CPUMIsGuestDebugStateActive(pVCpu))
7141 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7142
7143 HM_RESTORE_PREEMPT();
7144 VMMRZCallRing3Enable(pVCpu);
7145#else
7146 /** @todo */
7147#endif
7148
7149 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7150 AssertRCReturn(rc, rc);
7151
7152 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7153 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7154
7155 /* Paranoia. */
7156 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7157 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7158
7159 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7160 AssertRC(rc);
7161
7162 /*
7163 * Raise #DB in the guest.
7164 *
7165 * It is important to reflect exactly what the VM-exit gave us (preserving the
7166 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7167 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7168 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7169 *
7170 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7171 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7172 */
7173 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7174 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7175 return VINF_SUCCESS;
7176 }
7177
7178 /*
7179 * Not a guest trap, must be a hypervisor related debug event then.
7180 * Update DR6 in case someone is interested in it.
7181 */
7182 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7183 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7184 CPUMSetHyperDR6(pVCpu, uDR6);
7185
7186 return rc;
7187}
7188
7189
7190/**
7191 * Hacks its way around the lovely mesa driver's backdoor accesses.
7192 *
7193 * @sa hmR0SvmHandleMesaDrvGp.
7194 */
7195static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7196{
7197 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7198 RT_NOREF(pCtx);
7199
7200 /* For now we'll just skip the instruction. */
7201 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7202}
7203
7204
7205/**
7206 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7207 * backdoor logging w/o checking what it is running inside.
7208 *
7209 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7210 * backdoor port and magic numbers loaded in registers.
7211 *
7212 * @returns true if it is, false if it isn't.
7213 * @sa hmR0SvmIsMesaDrvGp.
7214 */
7215DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7216{
7217 /* 0xed: IN eAX,dx */
7218 uint8_t abInstr[1];
7219 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7220 return false;
7221
7222 /* Check that it is #GP(0). */
7223 if (pVmxTransient->uExitIntErrorCode != 0)
7224 return false;
7225
7226 /* Check magic and port. */
7227 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7228 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7229 if (pCtx->rax != UINT32_C(0x564d5868))
7230 return false;
7231 if (pCtx->dx != UINT32_C(0x5658))
7232 return false;
7233
7234 /* Flat ring-3 CS. */
7235 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7236 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7237 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7238 if (pCtx->cs.Attr.n.u2Dpl != 3)
7239 return false;
7240 if (pCtx->cs.u64Base != 0)
7241 return false;
7242
7243 /* Check opcode. */
7244 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7245 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7246 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7247 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7248 if (RT_FAILURE(rc))
7249 return false;
7250 if (abInstr[0] != 0xed)
7251 return false;
7252
7253 return true;
7254}
7255
7256
7257/**
7258 * VM-exit exception handler for \#GP (General-protection exception).
7259 *
7260 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7261 */
7262static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7263{
7264 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7265 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7266
7267 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7268 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7269#ifndef IN_NEM_DARWIN
7270 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7271 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7272 { /* likely */ }
7273 else
7274#endif
7275 {
7276#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7277# ifndef IN_NEM_DARWIN
7278 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7279# else
7280 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7281# endif
7282#endif
7283 /*
7284 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7285 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7286 */
7287 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7288 AssertRCReturn(rc, rc);
7289 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7290 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7291
7292 if ( pVmxTransient->fIsNestedGuest
7293 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7294 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7295 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7296 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7297 else
7298 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7299 return rc;
7300 }
7301
7302#ifndef IN_NEM_DARWIN
7303 Assert(CPUMIsGuestInRealModeEx(pCtx));
7304 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7305 Assert(!pVmxTransient->fIsNestedGuest);
7306
7307 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7308 AssertRCReturn(rc, rc);
7309
7310 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7311 if (rcStrict == VINF_SUCCESS)
7312 {
7313 if (!CPUMIsGuestInRealModeEx(pCtx))
7314 {
7315 /*
7316 * The guest is no longer in real-mode, check if we can continue executing the
7317 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7318 */
7319 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7320 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7321 {
7322 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7323 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7324 }
7325 else
7326 {
7327 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7328 rcStrict = VINF_EM_RESCHEDULE;
7329 }
7330 }
7331 else
7332 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7333 }
7334 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7335 {
7336 rcStrict = VINF_SUCCESS;
7337 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7338 }
7339 return VBOXSTRICTRC_VAL(rcStrict);
7340#endif
7341}
7342
7343
7344/**
7345 * VM-exit exception handler for \#DE (Divide Error).
7346 *
7347 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7348 */
7349static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7350{
7351 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7352 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7353
7354 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7355 AssertRCReturn(rc, rc);
7356
7357 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7358 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7359 {
7360 uint8_t cbInstr = 0;
7361 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7362 if (rc2 == VINF_SUCCESS)
7363 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7364 else if (rc2 == VERR_NOT_FOUND)
7365 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7366 else
7367 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7368 }
7369 else
7370 rcStrict = VINF_SUCCESS; /* Do nothing. */
7371
7372 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7373 if (RT_FAILURE(rcStrict))
7374 {
7375 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7376 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7377 rcStrict = VINF_SUCCESS;
7378 }
7379
7380 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7381 return VBOXSTRICTRC_VAL(rcStrict);
7382}
7383
7384
7385/**
7386 * VM-exit exception handler wrapper for all other exceptions that are not handled
7387 * by a specific handler.
7388 *
7389 * This simply re-injects the exception back into the VM without any special
7390 * processing.
7391 *
7392 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7393 */
7394static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7395{
7396 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7397
7398#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7399# ifndef IN_NEM_DARWIN
7400 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7401 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7402 ("uVector=%#x u32XcptBitmap=%#X32\n",
7403 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7404 NOREF(pVmcsInfo);
7405# endif
7406#endif
7407
7408 /*
7409 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7410 * would have been handled while checking exits due to event delivery.
7411 */
7412 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7413
7414#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7415 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7416 AssertRCReturn(rc, rc);
7417 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7418#endif
7419
7420#ifdef VBOX_WITH_STATISTICS
7421 switch (uVector)
7422 {
7423 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7424 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7425 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7426 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7427 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7428 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7429 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7430 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7431 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7432 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7433 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7434 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7435 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7436 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7437 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7438 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7439 default:
7440 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7441 break;
7442 }
7443#endif
7444
7445 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7446 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7447 NOREF(uVector);
7448
7449 /* Re-inject the original exception into the guest. */
7450 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7451 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7452 return VINF_SUCCESS;
7453}
7454
7455
7456/**
7457 * VM-exit exception handler for all exceptions (except NMIs!).
7458 *
7459 * @remarks This may be called for both guests and nested-guests. Take care to not
7460 * make assumptions and avoid doing anything that is not relevant when
7461 * executing a nested-guest (e.g., Mesa driver hacks).
7462 */
7463static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7464{
7465 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7466
7467 /*
7468 * If this VM-exit occurred while delivering an event through the guest IDT, take
7469 * action based on the return code and additional hints (e.g. for page-faults)
7470 * that will be updated in the VMX transient structure.
7471 */
7472 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7473 if (rcStrict == VINF_SUCCESS)
7474 {
7475 /*
7476 * If an exception caused a VM-exit due to delivery of an event, the original
7477 * event may have to be re-injected into the guest. We shall reinject it and
7478 * continue guest execution. However, page-fault is a complicated case and
7479 * needs additional processing done in vmxHCExitXcptPF().
7480 */
7481 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7482 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7483 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7484 || uVector == X86_XCPT_PF)
7485 {
7486 switch (uVector)
7487 {
7488 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7489 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7490 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7491 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7492 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7493 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7494 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7495 default:
7496 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7497 }
7498 }
7499 /* else: inject pending event before resuming guest execution. */
7500 }
7501 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7502 {
7503 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7504 rcStrict = VINF_SUCCESS;
7505 }
7506
7507 return rcStrict;
7508}
7509/** @} */
7510
7511
7512/** @name VM-exit handlers.
7513 * @{
7514 */
7515/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7516/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7517/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7518
7519/**
7520 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7521 */
7522HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7523{
7524 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7525 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7526
7527#ifndef IN_NEM_DARWIN
7528 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7529 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7530 return VINF_SUCCESS;
7531 return VINF_EM_RAW_INTERRUPT;
7532#else
7533 return VINF_SUCCESS;
7534#endif
7535}
7536
7537
7538/**
7539 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7540 * VM-exit.
7541 */
7542HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7543{
7544 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7545 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7546
7547 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7548
7549 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7550 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7551 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7552
7553 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7554 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7555 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7556 NOREF(pVmcsInfo);
7557
7558 VBOXSTRICTRC rcStrict;
7559 switch (uExitIntType)
7560 {
7561#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7562 /*
7563 * Host physical NMIs:
7564 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7565 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7566 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7567 *
7568 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7569 * See Intel spec. 27.5.5 "Updating Non-Register State".
7570 */
7571 case VMX_EXIT_INT_INFO_TYPE_NMI:
7572 {
7573 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7574 break;
7575 }
7576#endif
7577
7578 /*
7579 * Privileged software exceptions (#DB from ICEBP),
7580 * Software exceptions (#BP and #OF),
7581 * Hardware exceptions:
7582 * Process the required exceptions and resume guest execution if possible.
7583 */
7584 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7585 Assert(uVector == X86_XCPT_DB);
7586 RT_FALL_THRU();
7587 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7588 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7589 RT_FALL_THRU();
7590 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7591 {
7592 NOREF(uVector);
7593 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7594 | HMVMX_READ_EXIT_INSTR_LEN
7595 | HMVMX_READ_IDT_VECTORING_INFO
7596 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7597 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7598 break;
7599 }
7600
7601 default:
7602 {
7603 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7604 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7605 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7606 break;
7607 }
7608 }
7609
7610 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7611 return rcStrict;
7612}
7613
7614
7615/**
7616 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7617 */
7618HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7619{
7620 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7621
7622 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7623 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7624 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7625
7626 /* Evaluate and deliver pending events and resume guest execution. */
7627 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7628 return VINF_SUCCESS;
7629}
7630
7631
7632/**
7633 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7634 */
7635HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7636{
7637 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7638
7639 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7640 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7641 {
7642 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7643 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7644 }
7645
7646 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7647
7648 /*
7649 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7650 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7651 */
7652 uint32_t fIntrState;
7653 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7654 AssertRC(rc);
7655 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7656 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7657 {
7658 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7659
7660 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7661 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7662 AssertRC(rc);
7663 }
7664
7665 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7666 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7667
7668 /* Evaluate and deliver pending events and resume guest execution. */
7669 return VINF_SUCCESS;
7670}
7671
7672
7673/**
7674 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7675 */
7676HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7677{
7678 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7679 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7680}
7681
7682
7683/**
7684 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7685 */
7686HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7687{
7688 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7689 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7690}
7691
7692
7693/**
7694 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7695 */
7696HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7697{
7698 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7699
7700 /*
7701 * Get the state we need and update the exit history entry.
7702 */
7703 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7704 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7705 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7706 AssertRCReturn(rc, rc);
7707
7708 VBOXSTRICTRC rcStrict;
7709 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7710 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7711 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7712 if (!pExitRec)
7713 {
7714 /*
7715 * Regular CPUID instruction execution.
7716 */
7717 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7718 if (rcStrict == VINF_SUCCESS)
7719 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7720 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7721 {
7722 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7723 rcStrict = VINF_SUCCESS;
7724 }
7725 }
7726 else
7727 {
7728 /*
7729 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7730 */
7731 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7732 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7733 AssertRCReturn(rc2, rc2);
7734
7735 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7736 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7737
7738 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7739 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7740
7741 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7742 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7743 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7744 }
7745 return rcStrict;
7746}
7747
7748
7749/**
7750 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7751 */
7752HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7753{
7754 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7755
7756 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7757 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7758 AssertRCReturn(rc, rc);
7759
7760 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7761 return VINF_EM_RAW_EMULATE_INSTR;
7762
7763 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7764 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7765}
7766
7767
7768/**
7769 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7770 */
7771HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7772{
7773 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7774
7775 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7776 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7777 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7778 AssertRCReturn(rc, rc);
7779
7780 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7781 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7782 {
7783 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7784 we must reset offsetting on VM-entry. See @bugref{6634}. */
7785 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7786 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7787 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7788 }
7789 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7790 {
7791 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7792 rcStrict = VINF_SUCCESS;
7793 }
7794 return rcStrict;
7795}
7796
7797
7798/**
7799 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7800 */
7801HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7802{
7803 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7804
7805 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7806 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7807 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7808 AssertRCReturn(rc, rc);
7809
7810 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7811 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7812 {
7813 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7814 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7815 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7816 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7818 }
7819 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7820 {
7821 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7822 rcStrict = VINF_SUCCESS;
7823 }
7824 return rcStrict;
7825}
7826
7827
7828/**
7829 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7830 */
7831HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7832{
7833 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7834
7835 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7836 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7837 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7838 AssertRCReturn(rc, rc);
7839
7840 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7841 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7842 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7843 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7844 {
7845 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7846 rcStrict = VINF_SUCCESS;
7847 }
7848 return rcStrict;
7849}
7850
7851
7852/**
7853 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7854 */
7855HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7856{
7857 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7858
7859 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7860 if (EMAreHypercallInstructionsEnabled(pVCpu))
7861 {
7862 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7863 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7864 | CPUMCTX_EXTRN_RFLAGS
7865 | CPUMCTX_EXTRN_CR0
7866 | CPUMCTX_EXTRN_SS
7867 | CPUMCTX_EXTRN_CS
7868 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7869 AssertRCReturn(rc, rc);
7870
7871 /* Perform the hypercall. */
7872 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7873 if (rcStrict == VINF_SUCCESS)
7874 {
7875 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7876 AssertRCReturn(rc, rc);
7877 }
7878 else
7879 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7880 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7881 || RT_FAILURE(rcStrict));
7882
7883 /* If the hypercall changes anything other than guest's general-purpose registers,
7884 we would need to reload the guest changed bits here before VM-entry. */
7885 }
7886 else
7887 Log4Func(("Hypercalls not enabled\n"));
7888
7889 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7890 if (RT_FAILURE(rcStrict))
7891 {
7892 vmxHCSetPendingXcptUD(pVCpu);
7893 rcStrict = VINF_SUCCESS;
7894 }
7895
7896 return rcStrict;
7897}
7898
7899
7900/**
7901 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7902 */
7903HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7904{
7905 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7906#ifndef IN_NEM_DARWIN
7907 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7908#endif
7909
7910 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7911 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7912 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7913 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7914 AssertRCReturn(rc, rc);
7915
7916 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7917
7918 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7919 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7920 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7921 {
7922 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7923 rcStrict = VINF_SUCCESS;
7924 }
7925 else
7926 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7927 VBOXSTRICTRC_VAL(rcStrict)));
7928 return rcStrict;
7929}
7930
7931
7932/**
7933 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7934 */
7935HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7936{
7937 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7938
7939 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7940 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7941 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7942 AssertRCReturn(rc, rc);
7943
7944 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7945 if (rcStrict == VINF_SUCCESS)
7946 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7947 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7948 {
7949 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7950 rcStrict = VINF_SUCCESS;
7951 }
7952
7953 return rcStrict;
7954}
7955
7956
7957/**
7958 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7959 */
7960HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7961{
7962 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7963
7964 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7965 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7966 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7967 AssertRCReturn(rc, rc);
7968
7969 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7970 if (RT_SUCCESS(rcStrict))
7971 {
7972 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7973 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7974 rcStrict = VINF_SUCCESS;
7975 }
7976
7977 return rcStrict;
7978}
7979
7980
7981/**
7982 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7983 * VM-exit.
7984 */
7985HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7986{
7987 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7988 return VINF_EM_RESET;
7989}
7990
7991
7992/**
7993 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7994 */
7995HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7996{
7997 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7998
7999 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8000 AssertRCReturn(rc, rc);
8001
8002 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8003 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8004 rc = VINF_SUCCESS;
8005 else
8006 rc = VINF_EM_HALT;
8007
8008 if (rc != VINF_SUCCESS)
8009 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8010 return rc;
8011}
8012
8013
8014#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8015/**
8016 * VM-exit handler for instructions that result in a \#UD exception delivered to
8017 * the guest.
8018 */
8019HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8020{
8021 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8022 vmxHCSetPendingXcptUD(pVCpu);
8023 return VINF_SUCCESS;
8024}
8025#endif
8026
8027
8028/**
8029 * VM-exit handler for expiry of the VMX-preemption timer.
8030 */
8031HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8032{
8033 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8034
8035 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8036 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8037Log12(("vmxHCExitPreemptTimer:\n"));
8038
8039 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8040 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8041 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8042 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8043 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8044}
8045
8046
8047/**
8048 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8049 */
8050HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8051{
8052 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8053
8054 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8055 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8056 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8057 AssertRCReturn(rc, rc);
8058
8059 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8061 : HM_CHANGED_RAISED_XCPT_MASK);
8062
8063#ifndef IN_NEM_DARWIN
8064 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8065 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8066 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8067 {
8068 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8069 hmR0VmxUpdateStartVmFunction(pVCpu);
8070 }
8071#endif
8072
8073 return rcStrict;
8074}
8075
8076
8077/**
8078 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8079 */
8080HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8081{
8082 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8083
8084 /** @todo Enable the new code after finding a reliably guest test-case. */
8085#if 1
8086 return VERR_EM_INTERPRETER;
8087#else
8088 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8089 | HMVMX_READ_EXIT_INSTR_INFO
8090 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8091 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8092 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8093 AssertRCReturn(rc, rc);
8094
8095 /* Paranoia. Ensure this has a memory operand. */
8096 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8097
8098 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8099 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8100 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8101 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8102
8103 RTGCPTR GCPtrDesc;
8104 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8105
8106 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8107 GCPtrDesc, uType);
8108 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8109 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8110 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8111 {
8112 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8113 rcStrict = VINF_SUCCESS;
8114 }
8115 return rcStrict;
8116#endif
8117}
8118
8119
8120/**
8121 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8122 * VM-exit.
8123 */
8124HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8125{
8126 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8127 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8128 AssertRCReturn(rc, rc);
8129
8130 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8131 if (RT_FAILURE(rc))
8132 return rc;
8133
8134 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8135 NOREF(uInvalidReason);
8136
8137#ifdef VBOX_STRICT
8138 uint32_t fIntrState;
8139 uint64_t u64Val;
8140 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8141 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8142 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8143
8144 Log4(("uInvalidReason %u\n", uInvalidReason));
8145 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8146 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8147 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8148
8149 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8150 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8151 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8152 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8153 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8154 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8155 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8156 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8157 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8158 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8159 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8160 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8161# ifndef IN_NEM_DARWIN
8162 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8163 {
8164 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8165 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8166 }
8167
8168 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8169# endif
8170#endif
8171
8172 return VERR_VMX_INVALID_GUEST_STATE;
8173}
8174
8175/**
8176 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8177 */
8178HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8179{
8180 /*
8181 * Cumulative notes of all recognized but unexpected VM-exits.
8182 *
8183 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8184 * nested-paging is used.
8185 *
8186 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8187 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8188 * this function (and thereby stop VM execution) for handling such instructions.
8189 *
8190 *
8191 * VMX_EXIT_INIT_SIGNAL:
8192 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8193 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8194 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8195 *
8196 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8197 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8198 * See Intel spec. "23.8 Restrictions on VMX operation".
8199 *
8200 * VMX_EXIT_SIPI:
8201 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8202 * activity state is used. We don't make use of it as our guests don't have direct
8203 * access to the host local APIC.
8204 *
8205 * See Intel spec. 25.3 "Other Causes of VM-exits".
8206 *
8207 * VMX_EXIT_IO_SMI:
8208 * VMX_EXIT_SMI:
8209 * This can only happen if we support dual-monitor treatment of SMI, which can be
8210 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8211 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8212 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8213 *
8214 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8215 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8216 *
8217 * VMX_EXIT_ERR_MSR_LOAD:
8218 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8219 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8220 * execution.
8221 *
8222 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8223 *
8224 * VMX_EXIT_ERR_MACHINE_CHECK:
8225 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8226 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8227 * #MC exception abort class exception is raised. We thus cannot assume a
8228 * reasonable chance of continuing any sort of execution and we bail.
8229 *
8230 * See Intel spec. 15.1 "Machine-check Architecture".
8231 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8232 *
8233 * VMX_EXIT_PML_FULL:
8234 * VMX_EXIT_VIRTUALIZED_EOI:
8235 * VMX_EXIT_APIC_WRITE:
8236 * We do not currently support any of these features and thus they are all unexpected
8237 * VM-exits.
8238 *
8239 * VMX_EXIT_GDTR_IDTR_ACCESS:
8240 * VMX_EXIT_LDTR_TR_ACCESS:
8241 * VMX_EXIT_RDRAND:
8242 * VMX_EXIT_RSM:
8243 * VMX_EXIT_VMFUNC:
8244 * VMX_EXIT_ENCLS:
8245 * VMX_EXIT_RDSEED:
8246 * VMX_EXIT_XSAVES:
8247 * VMX_EXIT_XRSTORS:
8248 * VMX_EXIT_UMWAIT:
8249 * VMX_EXIT_TPAUSE:
8250 * VMX_EXIT_LOADIWKEY:
8251 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8252 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8253 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8254 *
8255 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8256 */
8257 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8258 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8259 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8260}
8261
8262
8263/**
8264 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8265 */
8266HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8267{
8268 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8269
8270 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8271
8272 /** @todo Optimize this: We currently drag in the whole MSR state
8273 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8274 * MSRs required. That would require changes to IEM and possibly CPUM too.
8275 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8276 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8277 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8278 int rc;
8279 switch (idMsr)
8280 {
8281 default:
8282 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8283 __FUNCTION__);
8284 AssertRCReturn(rc, rc);
8285 break;
8286 case MSR_K8_FS_BASE:
8287 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8288 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8289 AssertRCReturn(rc, rc);
8290 break;
8291 case MSR_K8_GS_BASE:
8292 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8293 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8294 AssertRCReturn(rc, rc);
8295 break;
8296 }
8297
8298 Log4Func(("ecx=%#RX32\n", idMsr));
8299
8300#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8301 Assert(!pVmxTransient->fIsNestedGuest);
8302 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8303 {
8304 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8305 && idMsr != MSR_K6_EFER)
8306 {
8307 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8308 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8309 }
8310 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8311 {
8312 Assert(pVmcsInfo->pvMsrBitmap);
8313 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8314 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8315 {
8316 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8317 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8318 }
8319 }
8320 }
8321#endif
8322
8323 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8324 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8325 if (rcStrict == VINF_SUCCESS)
8326 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8327 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8328 {
8329 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8330 rcStrict = VINF_SUCCESS;
8331 }
8332 else
8333 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8334 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8335
8336 return rcStrict;
8337}
8338
8339
8340/**
8341 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8342 */
8343HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8344{
8345 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8346
8347 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8348
8349 /*
8350 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8351 * Although we don't need to fetch the base as it will be overwritten shortly, while
8352 * loading guest-state we would also load the entire segment register including limit
8353 * and attributes and thus we need to load them here.
8354 */
8355 /** @todo Optimize this: We currently drag in the whole MSR state
8356 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8357 * MSRs required. That would require changes to IEM and possibly CPUM too.
8358 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8359 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8360 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8361 int rc;
8362 switch (idMsr)
8363 {
8364 default:
8365 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8366 __FUNCTION__);
8367 AssertRCReturn(rc, rc);
8368 break;
8369
8370 case MSR_K8_FS_BASE:
8371 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8372 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8373 AssertRCReturn(rc, rc);
8374 break;
8375 case MSR_K8_GS_BASE:
8376 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8377 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8378 AssertRCReturn(rc, rc);
8379 break;
8380 }
8381 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8382
8383 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8384 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8385
8386 if (rcStrict == VINF_SUCCESS)
8387 {
8388 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8389
8390 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8391 if ( idMsr == MSR_IA32_APICBASE
8392 || ( idMsr >= MSR_IA32_X2APIC_START
8393 && idMsr <= MSR_IA32_X2APIC_END))
8394 {
8395 /*
8396 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8397 * When full APIC register virtualization is implemented we'll have to make
8398 * sure APIC state is saved from the VMCS before IEM changes it.
8399 */
8400 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8401 }
8402 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8403 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8404 else if (idMsr == MSR_K6_EFER)
8405 {
8406 /*
8407 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8408 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8409 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8410 */
8411 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8412 }
8413
8414 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8415 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8416 {
8417 switch (idMsr)
8418 {
8419 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8420 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8421 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8422 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8423 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8424 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8425 default:
8426 {
8427#ifndef IN_NEM_DARWIN
8428 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8429 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8430 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8431 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8432#else
8433 AssertMsgFailed(("TODO\n"));
8434#endif
8435 break;
8436 }
8437 }
8438 }
8439#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8440 else
8441 {
8442 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8443 switch (idMsr)
8444 {
8445 case MSR_IA32_SYSENTER_CS:
8446 case MSR_IA32_SYSENTER_EIP:
8447 case MSR_IA32_SYSENTER_ESP:
8448 case MSR_K8_FS_BASE:
8449 case MSR_K8_GS_BASE:
8450 {
8451 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8452 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8453 }
8454
8455 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8456 default:
8457 {
8458 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8459 {
8460 /* EFER MSR writes are always intercepted. */
8461 if (idMsr != MSR_K6_EFER)
8462 {
8463 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8464 idMsr));
8465 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8466 }
8467 }
8468
8469 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8470 {
8471 Assert(pVmcsInfo->pvMsrBitmap);
8472 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8473 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8474 {
8475 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8476 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8477 }
8478 }
8479 break;
8480 }
8481 }
8482 }
8483#endif /* VBOX_STRICT */
8484 }
8485 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8486 {
8487 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8488 rcStrict = VINF_SUCCESS;
8489 }
8490 else
8491 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8492 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8493
8494 return rcStrict;
8495}
8496
8497
8498/**
8499 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8500 */
8501HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8502{
8503 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8504
8505 /** @todo The guest has likely hit a contended spinlock. We might want to
8506 * poke a schedule different guest VCPU. */
8507 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8508 if (RT_SUCCESS(rc))
8509 return VINF_EM_RAW_INTERRUPT;
8510
8511 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8512 return rc;
8513}
8514
8515
8516/**
8517 * VM-exit handler for when the TPR value is lowered below the specified
8518 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8519 */
8520HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8521{
8522 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8523 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8524
8525 /*
8526 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8527 * We'll re-evaluate pending interrupts and inject them before the next VM
8528 * entry so we can just continue execution here.
8529 */
8530 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8531 return VINF_SUCCESS;
8532}
8533
8534
8535/**
8536 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8537 * VM-exit.
8538 *
8539 * @retval VINF_SUCCESS when guest execution can continue.
8540 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8541 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8542 * incompatible guest state for VMX execution (real-on-v86 case).
8543 */
8544HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8545{
8546 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8547 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8548
8549 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8550 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8551 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8552
8553 VBOXSTRICTRC rcStrict;
8554 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8555 uint64_t const uExitQual = pVmxTransient->uExitQual;
8556 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8557 switch (uAccessType)
8558 {
8559 /*
8560 * MOV to CRx.
8561 */
8562 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8563 {
8564 /*
8565 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8566 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8567 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8568 * PAE PDPTEs as well.
8569 */
8570 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8571 AssertRCReturn(rc, rc);
8572
8573 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8574#ifndef IN_NEM_DARWIN
8575 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8576#endif
8577 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8578 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8579
8580 /*
8581 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8582 * - When nested paging isn't used.
8583 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8584 * - We are executing in the VM debug loop.
8585 */
8586#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8587# ifndef IN_NEM_DARWIN
8588 Assert( iCrReg != 3
8589 || !VM_IS_VMX_NESTED_PAGING(pVM)
8590 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8591 || pVCpu->hmr0.s.fUsingDebugLoop);
8592# else
8593 Assert( iCrReg != 3
8594 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8595# endif
8596#endif
8597
8598 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8599 Assert( iCrReg != 8
8600 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8601
8602 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8603 AssertMsg( rcStrict == VINF_SUCCESS
8604 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8605
8606#ifndef IN_NEM_DARWIN
8607 /*
8608 * This is a kludge for handling switches back to real mode when we try to use
8609 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8610 * deal with special selector values, so we have to return to ring-3 and run
8611 * there till the selector values are V86 mode compatible.
8612 *
8613 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8614 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8615 * this function.
8616 */
8617 if ( iCrReg == 0
8618 && rcStrict == VINF_SUCCESS
8619 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8620 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8621 && (uOldCr0 & X86_CR0_PE)
8622 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8623 {
8624 /** @todo Check selectors rather than returning all the time. */
8625 Assert(!pVmxTransient->fIsNestedGuest);
8626 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8627 rcStrict = VINF_EM_RESCHEDULE_REM;
8628 }
8629#endif
8630
8631 break;
8632 }
8633
8634 /*
8635 * MOV from CRx.
8636 */
8637 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8638 {
8639 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8640 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8641
8642 /*
8643 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8644 * - When nested paging isn't used.
8645 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8646 * - We are executing in the VM debug loop.
8647 */
8648#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8649# ifndef IN_NEM_DARWIN
8650 Assert( iCrReg != 3
8651 || !VM_IS_VMX_NESTED_PAGING(pVM)
8652 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8653 || pVCpu->hmr0.s.fLeaveDone);
8654# else
8655 Assert( iCrReg != 3
8656 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8657# endif
8658#endif
8659
8660 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8661 Assert( iCrReg != 8
8662 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8663
8664 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8665 break;
8666 }
8667
8668 /*
8669 * CLTS (Clear Task-Switch Flag in CR0).
8670 */
8671 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8672 {
8673 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8674 break;
8675 }
8676
8677 /*
8678 * LMSW (Load Machine-Status Word into CR0).
8679 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8680 */
8681 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8682 {
8683 RTGCPTR GCPtrEffDst;
8684 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8685 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8686 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8687 if (fMemOperand)
8688 {
8689 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8690 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8691 }
8692 else
8693 GCPtrEffDst = NIL_RTGCPTR;
8694 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8695 break;
8696 }
8697
8698 default:
8699 {
8700 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8701 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8702 }
8703 }
8704
8705 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8706 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8707 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8708
8709 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8710 NOREF(pVM);
8711 return rcStrict;
8712}
8713
8714
8715/**
8716 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8717 * VM-exit.
8718 */
8719HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8720{
8721 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8722 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8723
8724 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8725 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8726 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8727 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8728#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8729 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8730 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8731 AssertRCReturn(rc, rc);
8732
8733 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8734 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8735 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8736 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8737 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8738 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8739 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8740 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8741
8742 /*
8743 * Update exit history to see if this exit can be optimized.
8744 */
8745 VBOXSTRICTRC rcStrict;
8746 PCEMEXITREC pExitRec = NULL;
8747 if ( !fGstStepping
8748 && !fDbgStepping)
8749 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8750 !fIOString
8751 ? !fIOWrite
8752 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8753 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8754 : !fIOWrite
8755 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8756 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8757 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8758 if (!pExitRec)
8759 {
8760 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8761 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8762
8763 uint32_t const cbValue = s_aIOSizes[uIOSize];
8764 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8765 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8766 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8767 if (fIOString)
8768 {
8769 /*
8770 * INS/OUTS - I/O String instruction.
8771 *
8772 * Use instruction-information if available, otherwise fall back on
8773 * interpreting the instruction.
8774 */
8775 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8776 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8777 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8778 if (fInsOutsInfo)
8779 {
8780 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8781 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8782 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8783 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8784 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8785 if (fIOWrite)
8786 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8787 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8788 else
8789 {
8790 /*
8791 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8792 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8793 * See Intel Instruction spec. for "INS".
8794 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8795 */
8796 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8797 }
8798 }
8799 else
8800 rcStrict = IEMExecOne(pVCpu);
8801
8802 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8803 fUpdateRipAlready = true;
8804 }
8805 else
8806 {
8807 /*
8808 * IN/OUT - I/O instruction.
8809 */
8810 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8811 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8812 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8813 if (fIOWrite)
8814 {
8815 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8816 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8817#ifndef IN_NEM_DARWIN
8818 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8819 && !pCtx->eflags.Bits.u1TF)
8820 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8821#endif
8822 }
8823 else
8824 {
8825 uint32_t u32Result = 0;
8826 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8827 if (IOM_SUCCESS(rcStrict))
8828 {
8829 /* Save result of I/O IN instr. in AL/AX/EAX. */
8830 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8831 }
8832#ifndef IN_NEM_DARWIN
8833 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8834 && !pCtx->eflags.Bits.u1TF)
8835 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8836#endif
8837 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8838 }
8839 }
8840
8841 if (IOM_SUCCESS(rcStrict))
8842 {
8843 if (!fUpdateRipAlready)
8844 {
8845 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8846 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8847 }
8848
8849 /*
8850 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8851 * while booting Fedora 17 64-bit guest.
8852 *
8853 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8854 */
8855 if (fIOString)
8856 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8857
8858 /*
8859 * If any I/O breakpoints are armed, we need to check if one triggered
8860 * and take appropriate action.
8861 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8862 */
8863#if 1
8864 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8865#else
8866 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8867 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8868 AssertRCReturn(rc, rc);
8869#endif
8870
8871 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8872 * execution engines about whether hyper BPs and such are pending. */
8873 uint32_t const uDr7 = pCtx->dr[7];
8874 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8875 && X86_DR7_ANY_RW_IO(uDr7)
8876 && (pCtx->cr4 & X86_CR4_DE))
8877 || DBGFBpIsHwIoArmed(pVM)))
8878 {
8879 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8880
8881#ifndef IN_NEM_DARWIN
8882 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8883 VMMRZCallRing3Disable(pVCpu);
8884 HM_DISABLE_PREEMPT(pVCpu);
8885
8886 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8887
8888 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8889 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8890 {
8891 /* Raise #DB. */
8892 if (fIsGuestDbgActive)
8893 ASMSetDR6(pCtx->dr[6]);
8894 if (pCtx->dr[7] != uDr7)
8895 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8896
8897 vmxHCSetPendingXcptDB(pVCpu);
8898 }
8899 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8900 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8901 else if ( rcStrict2 != VINF_SUCCESS
8902 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8903 rcStrict = rcStrict2;
8904 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8905
8906 HM_RESTORE_PREEMPT();
8907 VMMRZCallRing3Enable(pVCpu);
8908#else
8909 /** @todo */
8910#endif
8911 }
8912 }
8913
8914#ifdef VBOX_STRICT
8915 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8916 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8917 Assert(!fIOWrite);
8918 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8919 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8920 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8921 Assert(fIOWrite);
8922 else
8923 {
8924# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8925 * statuses, that the VMM device and some others may return. See
8926 * IOM_SUCCESS() for guidance. */
8927 AssertMsg( RT_FAILURE(rcStrict)
8928 || rcStrict == VINF_SUCCESS
8929 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8930 || rcStrict == VINF_EM_DBG_BREAKPOINT
8931 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8932 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8933# endif
8934 }
8935#endif
8936 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8937 }
8938 else
8939 {
8940 /*
8941 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8942 */
8943 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8944 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8945 AssertRCReturn(rc2, rc2);
8946 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8947 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8948 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8949 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8950 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8951 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8952
8953 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8954 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8955
8956 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8957 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8958 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8959 }
8960 return rcStrict;
8961}
8962
8963
8964/**
8965 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8966 * VM-exit.
8967 */
8968HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8969{
8970 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8971
8972 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8973 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8974 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8975 {
8976 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8977 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8978 {
8979 uint32_t uErrCode;
8980 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8981 {
8982 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8983 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8984 }
8985 else
8986 uErrCode = 0;
8987
8988 RTGCUINTPTR GCPtrFaultAddress;
8989 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8990 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8991 else
8992 GCPtrFaultAddress = 0;
8993
8994 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8995
8996 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8997 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8998
8999 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9000 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9001 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9002 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9003 }
9004 }
9005
9006 /* Fall back to the interpreter to emulate the task-switch. */
9007 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9008 return VERR_EM_INTERPRETER;
9009}
9010
9011
9012/**
9013 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9014 */
9015HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9016{
9017 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9018
9019 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9020 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9021 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9022 AssertRC(rc);
9023 return VINF_EM_DBG_STEPPED;
9024}
9025
9026
9027/**
9028 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9029 */
9030HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9031{
9032 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9033 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9034
9035 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9036 | HMVMX_READ_EXIT_INSTR_LEN
9037 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9038 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9039 | HMVMX_READ_IDT_VECTORING_INFO
9040 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9041
9042 /*
9043 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9044 */
9045 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9046 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9047 {
9048 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9049 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9050 {
9051 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9052 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9053 }
9054 }
9055 else
9056 {
9057 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9058 return rcStrict;
9059 }
9060
9061 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9062 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9063 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9064 AssertRCReturn(rc, rc);
9065
9066 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9067 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9068 switch (uAccessType)
9069 {
9070#ifndef IN_NEM_DARWIN
9071 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9072 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9073 {
9074 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9075 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9076 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9077
9078 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9079 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9080 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9081 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9082 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9083
9084 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9085 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9086 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9087 if ( rcStrict == VINF_SUCCESS
9088 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9089 || rcStrict == VERR_PAGE_NOT_PRESENT)
9090 {
9091 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9092 | HM_CHANGED_GUEST_APIC_TPR);
9093 rcStrict = VINF_SUCCESS;
9094 }
9095 break;
9096 }
9097#else
9098 /** @todo */
9099#endif
9100
9101 default:
9102 {
9103 Log4Func(("uAccessType=%#x\n", uAccessType));
9104 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9105 break;
9106 }
9107 }
9108
9109 if (rcStrict != VINF_SUCCESS)
9110 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9111 return rcStrict;
9112}
9113
9114
9115/**
9116 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9117 * VM-exit.
9118 */
9119HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9120{
9121 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9122 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9123
9124 /*
9125 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9126 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9127 * must emulate the MOV DRx access.
9128 */
9129 if (!pVmxTransient->fIsNestedGuest)
9130 {
9131 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9132 if (pVmxTransient->fWasGuestDebugStateActive)
9133 {
9134 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9135 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9136 }
9137
9138 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9139 && !pVmxTransient->fWasHyperDebugStateActive)
9140 {
9141 Assert(!DBGFIsStepping(pVCpu));
9142 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9143
9144 /* Don't intercept MOV DRx any more. */
9145 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9146 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9147 AssertRC(rc);
9148
9149#ifndef IN_NEM_DARWIN
9150 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9151 VMMRZCallRing3Disable(pVCpu);
9152 HM_DISABLE_PREEMPT(pVCpu);
9153
9154 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9155 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9156 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9157
9158 HM_RESTORE_PREEMPT();
9159 VMMRZCallRing3Enable(pVCpu);
9160#else
9161 CPUMR3NemActivateGuestDebugState(pVCpu);
9162 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9163 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9164#endif
9165
9166#ifdef VBOX_WITH_STATISTICS
9167 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9168 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9169 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9170 else
9171 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9172#endif
9173 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9174 return VINF_SUCCESS;
9175 }
9176 }
9177
9178 /*
9179 * Import state. We must have DR7 loaded here as it's always consulted,
9180 * both for reading and writing. The other debug registers are never
9181 * exported as such.
9182 */
9183 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9184 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9185 | CPUMCTX_EXTRN_GPRS_MASK
9186 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9187 AssertRCReturn(rc, rc);
9188 Log4Func(("cs:rip=%#04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9189
9190 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9191 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9192
9193 VBOXSTRICTRC rcStrict;
9194 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9195 {
9196 /*
9197 * Write DRx register.
9198 */
9199 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9200 AssertMsg( rcStrict == VINF_SUCCESS
9201 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9202
9203 if (rcStrict == VINF_SUCCESS)
9204 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9205 * kept it for now to avoid breaking something non-obvious. */
9206 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9207 | HM_CHANGED_GUEST_DR7);
9208 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9209 {
9210 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9211 rcStrict = VINF_SUCCESS;
9212 }
9213
9214 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9215 }
9216 else
9217 {
9218 /*
9219 * Read DRx register into a general purpose register.
9220 */
9221 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9222 AssertMsg( rcStrict == VINF_SUCCESS
9223 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9224
9225 if (rcStrict == VINF_SUCCESS)
9226 {
9227 if (iGReg == X86_GREG_xSP)
9228 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9229 | HM_CHANGED_GUEST_RSP);
9230 else
9231 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9232 }
9233 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9234 {
9235 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9236 rcStrict = VINF_SUCCESS;
9237 }
9238
9239 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9240 }
9241
9242 return rcStrict;
9243}
9244
9245
9246/**
9247 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9248 * Conditional VM-exit.
9249 */
9250HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9251{
9252 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9253
9254#ifndef IN_NEM_DARWIN
9255 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9256
9257 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9258 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9259 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9260 | HMVMX_READ_IDT_VECTORING_INFO
9261 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9262 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9263
9264 /*
9265 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9266 */
9267 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9268 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9269 {
9270 /*
9271 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9272 * instruction emulation to inject the original event. Otherwise, injecting the original event
9273 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9274 */
9275 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9276 { /* likely */ }
9277 else
9278 {
9279 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9280# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9281 /** @todo NSTVMX: Think about how this should be handled. */
9282 if (pVmxTransient->fIsNestedGuest)
9283 return VERR_VMX_IPE_3;
9284# endif
9285 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9286 }
9287 }
9288 else
9289 {
9290 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9291 return rcStrict;
9292 }
9293
9294 /*
9295 * Get sufficient state and update the exit history entry.
9296 */
9297 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9298 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9299 AssertRCReturn(rc, rc);
9300
9301 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9302 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9303 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9304 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9305 if (!pExitRec)
9306 {
9307 /*
9308 * If we succeed, resume guest execution.
9309 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9310 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9311 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9312 * weird case. See @bugref{6043}.
9313 */
9314 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9315/** @todo bird: We can probably just go straight to IOM here and assume that
9316 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9317 * well. However, we need to address that aliasing workarounds that
9318 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9319 *
9320 * Might also be interesting to see if we can get this done more or
9321 * less locklessly inside IOM. Need to consider the lookup table
9322 * updating and use a bit more carefully first (or do all updates via
9323 * rendezvous) */
9324 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9325 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9326 if ( rcStrict == VINF_SUCCESS
9327 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9328 || rcStrict == VERR_PAGE_NOT_PRESENT)
9329 {
9330 /* Successfully handled MMIO operation. */
9331 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9332 | HM_CHANGED_GUEST_APIC_TPR);
9333 rcStrict = VINF_SUCCESS;
9334 }
9335 }
9336 else
9337 {
9338 /*
9339 * Frequent exit or something needing probing. Call EMHistoryExec.
9340 */
9341 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9342 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9343
9344 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9345 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9346
9347 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9348 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9349 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9350 }
9351 return rcStrict;
9352#else
9353 AssertFailed();
9354 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9355#endif
9356}
9357
9358
9359/**
9360 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9361 * VM-exit.
9362 */
9363HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9364{
9365 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9366#ifndef IN_NEM_DARWIN
9367 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9368
9369 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9370 | HMVMX_READ_EXIT_INSTR_LEN
9371 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9372 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9373 | HMVMX_READ_IDT_VECTORING_INFO
9374 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9375 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9376
9377 /*
9378 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9379 */
9380 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9381 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9382 {
9383 /*
9384 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9385 * we shall resolve the nested #PF and re-inject the original event.
9386 */
9387 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9388 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9389 }
9390 else
9391 {
9392 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9393 return rcStrict;
9394 }
9395
9396 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9397 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9398 AssertRCReturn(rc, rc);
9399
9400 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9401 uint64_t const uExitQual = pVmxTransient->uExitQual;
9402 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9403
9404 RTGCUINT uErrorCode = 0;
9405 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9406 uErrorCode |= X86_TRAP_PF_ID;
9407 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9408 uErrorCode |= X86_TRAP_PF_RW;
9409 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9410 uErrorCode |= X86_TRAP_PF_P;
9411
9412 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9413 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9414
9415 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9416
9417 /*
9418 * Handle the pagefault trap for the nested shadow table.
9419 */
9420 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9421 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9422 TRPMResetTrap(pVCpu);
9423
9424 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9425 if ( rcStrict == VINF_SUCCESS
9426 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9427 || rcStrict == VERR_PAGE_NOT_PRESENT)
9428 {
9429 /* Successfully synced our nested page tables. */
9430 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9431 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9432 return VINF_SUCCESS;
9433 }
9434 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9435 return rcStrict;
9436
9437#else /* IN_NEM_DARWIN */
9438 PVM pVM = pVCpu->CTX_SUFF(pVM);
9439 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9440 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9441 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9442 vmxHCImportGuestRip(pVCpu);
9443 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9444
9445 /*
9446 * Ask PGM for information about the given GCPhys. We need to check if we're
9447 * out of sync first.
9448 */
9449 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9450 false,
9451 false };
9452 PGMPHYSNEMPAGEINFO Info;
9453 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9454 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9455 if (RT_SUCCESS(rc))
9456 {
9457 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9458 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9459 {
9460 if (State.fCanResume)
9461 {
9462 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9463 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9464 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9465 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9466 State.fDidSomething ? "" : " no-change"));
9467 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9468 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9469 return VINF_SUCCESS;
9470 }
9471 }
9472
9473 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9474 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9475 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9476 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9477 State.fDidSomething ? "" : " no-change"));
9478 }
9479 else
9480 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9481 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9482 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9483
9484 /*
9485 * Emulate the memory access, either access handler or special memory.
9486 */
9487 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9488 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9489 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9490 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9491 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9492
9493 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9494 AssertRCReturn(rc, rc);
9495
9496 VBOXSTRICTRC rcStrict;
9497 if (!pExitRec)
9498 rcStrict = IEMExecOne(pVCpu);
9499 else
9500 {
9501 /* Frequent access or probing. */
9502 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9503 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9504 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9505 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9506 }
9507
9508 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9509
9510 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9511 return rcStrict;
9512#endif /* IN_NEM_DARWIN */
9513}
9514
9515#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9516
9517/**
9518 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9519 */
9520HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9521{
9522 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9523
9524 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9525 | HMVMX_READ_EXIT_INSTR_INFO
9526 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9527 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9528 | CPUMCTX_EXTRN_SREG_MASK
9529 | CPUMCTX_EXTRN_HWVIRT
9530 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9531 AssertRCReturn(rc, rc);
9532
9533 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9534
9535 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9536 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9537
9538 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9539 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9540 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9541 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9542 {
9543 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9544 rcStrict = VINF_SUCCESS;
9545 }
9546 return rcStrict;
9547}
9548
9549
9550/**
9551 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9552 */
9553HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9554{
9555 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9556
9557 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9558 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9559 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9560 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9561 AssertRCReturn(rc, rc);
9562
9563 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9564
9565 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9566 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9567 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9568 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9569 {
9570 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9571 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9572 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9573 }
9574 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9575 return rcStrict;
9576}
9577
9578
9579/**
9580 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9581 */
9582HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9583{
9584 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9585
9586 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9587 | HMVMX_READ_EXIT_INSTR_INFO
9588 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9589 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9590 | CPUMCTX_EXTRN_SREG_MASK
9591 | CPUMCTX_EXTRN_HWVIRT
9592 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9593 AssertRCReturn(rc, rc);
9594
9595 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9596
9597 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9598 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9599
9600 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9601 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9602 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9603 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9604 {
9605 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9606 rcStrict = VINF_SUCCESS;
9607 }
9608 return rcStrict;
9609}
9610
9611
9612/**
9613 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9614 */
9615HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9616{
9617 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9618
9619 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9620 | HMVMX_READ_EXIT_INSTR_INFO
9621 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9622 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9623 | CPUMCTX_EXTRN_SREG_MASK
9624 | CPUMCTX_EXTRN_HWVIRT
9625 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9626 AssertRCReturn(rc, rc);
9627
9628 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9629
9630 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9631 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9632
9633 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9634 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9635 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9636 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9637 {
9638 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9639 rcStrict = VINF_SUCCESS;
9640 }
9641 return rcStrict;
9642}
9643
9644
9645/**
9646 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9647 */
9648HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9649{
9650 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9651
9652 /*
9653 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9654 * thus might not need to import the shadow VMCS state, it's safer just in case
9655 * code elsewhere dares look at unsynced VMCS fields.
9656 */
9657 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9658 | HMVMX_READ_EXIT_INSTR_INFO
9659 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9660 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9661 | CPUMCTX_EXTRN_SREG_MASK
9662 | CPUMCTX_EXTRN_HWVIRT
9663 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9664 AssertRCReturn(rc, rc);
9665
9666 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9667
9668 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9669 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9670 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9671
9672 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9673 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9674 {
9675 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9676
9677# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9678 /* Try for exit optimization. This is on the following instruction
9679 because it would be a waste of time to have to reinterpret the
9680 already decoded vmwrite instruction. */
9681 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9682 if (pExitRec)
9683 {
9684 /* Frequent access or probing. */
9685 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9686 AssertRCReturn(rc, rc);
9687
9688 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9689 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9690 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9691 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9692 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9693 }
9694# endif
9695 }
9696 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9697 {
9698 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9699 rcStrict = VINF_SUCCESS;
9700 }
9701 return rcStrict;
9702}
9703
9704
9705/**
9706 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9707 */
9708HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9709{
9710 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9711
9712 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9713 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9714 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9715 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9716 AssertRCReturn(rc, rc);
9717
9718 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9719
9720 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9721 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9722 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9723 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9724 {
9725 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9726 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9727 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9728 }
9729 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9730 return rcStrict;
9731}
9732
9733
9734/**
9735 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9736 */
9737HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9738{
9739 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9740
9741 /*
9742 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9743 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9744 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9745 */
9746 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9747 | HMVMX_READ_EXIT_INSTR_INFO
9748 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9749 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9750 | CPUMCTX_EXTRN_SREG_MASK
9751 | CPUMCTX_EXTRN_HWVIRT
9752 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9753 AssertRCReturn(rc, rc);
9754
9755 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9756
9757 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9758 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9759 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9760
9761 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9762 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9763 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9764 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9765 {
9766 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9767 rcStrict = VINF_SUCCESS;
9768 }
9769 return rcStrict;
9770}
9771
9772
9773/**
9774 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9775 */
9776HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9777{
9778 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9779
9780 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9781 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9782 | CPUMCTX_EXTRN_HWVIRT
9783 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9784 AssertRCReturn(rc, rc);
9785
9786 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9787
9788 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9789 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9790 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9791 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9792 {
9793 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9794 rcStrict = VINF_SUCCESS;
9795 }
9796 return rcStrict;
9797}
9798
9799
9800/**
9801 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9802 */
9803HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9804{
9805 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9806
9807 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9808 | HMVMX_READ_EXIT_INSTR_INFO
9809 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9810 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9811 | CPUMCTX_EXTRN_SREG_MASK
9812 | CPUMCTX_EXTRN_HWVIRT
9813 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9814 AssertRCReturn(rc, rc);
9815
9816 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9817
9818 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9819 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9820
9821 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9822 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9823 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9824 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9825 {
9826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9827 rcStrict = VINF_SUCCESS;
9828 }
9829 return rcStrict;
9830}
9831
9832
9833/**
9834 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9835 */
9836HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9837{
9838 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9839
9840 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9841 | HMVMX_READ_EXIT_INSTR_INFO
9842 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9843 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9844 | CPUMCTX_EXTRN_SREG_MASK
9845 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9846 AssertRCReturn(rc, rc);
9847
9848 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9849
9850 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9851 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9852
9853 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9854 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9855 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9856 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9857 {
9858 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9859 rcStrict = VINF_SUCCESS;
9860 }
9861 return rcStrict;
9862}
9863
9864
9865# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9866/**
9867 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9868 */
9869HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9870{
9871 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9872
9873 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9874 | HMVMX_READ_EXIT_INSTR_INFO
9875 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9876 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9877 | CPUMCTX_EXTRN_SREG_MASK
9878 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9879 AssertRCReturn(rc, rc);
9880
9881 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9882
9883 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9884 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9885
9886 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9887 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9888 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9889 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9890 {
9891 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9892 rcStrict = VINF_SUCCESS;
9893 }
9894 return rcStrict;
9895}
9896# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9897#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9898/** @} */
9899
9900
9901#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9902/** @name Nested-guest VM-exit handlers.
9903 * @{
9904 */
9905/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9906/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9907/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9908
9909/**
9910 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9911 * Conditional VM-exit.
9912 */
9913HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9914{
9915 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9916
9917 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9918
9919 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9920 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9921 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9922
9923 switch (uExitIntType)
9924 {
9925# ifndef IN_NEM_DARWIN
9926 /*
9927 * Physical NMIs:
9928 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9929 */
9930 case VMX_EXIT_INT_INFO_TYPE_NMI:
9931 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9932# endif
9933
9934 /*
9935 * Hardware exceptions,
9936 * Software exceptions,
9937 * Privileged software exceptions:
9938 * Figure out if the exception must be delivered to the guest or the nested-guest.
9939 */
9940 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9941 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9942 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9943 {
9944 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9945 | HMVMX_READ_EXIT_INSTR_LEN
9946 | HMVMX_READ_IDT_VECTORING_INFO
9947 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9948
9949 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9950 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9951 {
9952 /* Exit qualification is required for debug and page-fault exceptions. */
9953 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9954
9955 /*
9956 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9957 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9958 * length. However, if delivery of a software interrupt, software exception or privileged
9959 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9960 */
9961 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9962 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9963 pVmxTransient->uExitIntErrorCode,
9964 pVmxTransient->uIdtVectoringInfo,
9965 pVmxTransient->uIdtVectoringErrorCode);
9966#ifdef DEBUG_ramshankar
9967 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9968 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9969 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9970 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9971 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9972 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9973#endif
9974 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9975 }
9976
9977 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9978 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9979 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9980 }
9981
9982 /*
9983 * Software interrupts:
9984 * VM-exits cannot be caused by software interrupts.
9985 *
9986 * External interrupts:
9987 * This should only happen when "acknowledge external interrupts on VM-exit"
9988 * control is set. However, we never set this when executing a guest or
9989 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9990 * the guest.
9991 */
9992 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9993 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9994 default:
9995 {
9996 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9997 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9998 }
9999 }
10000}
10001
10002
10003/**
10004 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10005 * Unconditional VM-exit.
10006 */
10007HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10008{
10009 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10010 return IEMExecVmxVmexitTripleFault(pVCpu);
10011}
10012
10013
10014/**
10015 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10016 */
10017HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10018{
10019 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10020
10021 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10022 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10023 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10024}
10025
10026
10027/**
10028 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10029 */
10030HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10031{
10032 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10033
10034 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10035 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10036 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10037}
10038
10039
10040/**
10041 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10042 * Unconditional VM-exit.
10043 */
10044HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10045{
10046 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10047
10048 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10049 | HMVMX_READ_EXIT_INSTR_LEN
10050 | HMVMX_READ_IDT_VECTORING_INFO
10051 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10052
10053 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10054 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10055 pVmxTransient->uIdtVectoringErrorCode);
10056 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10057}
10058
10059
10060/**
10061 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10062 */
10063HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10064{
10065 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10066
10067 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10068 {
10069 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10070 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10071 }
10072 return vmxHCExitHlt(pVCpu, pVmxTransient);
10073}
10074
10075
10076/**
10077 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10078 */
10079HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10080{
10081 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10082
10083 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10084 {
10085 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10086 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10087 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10088 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10089 }
10090 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10091}
10092
10093
10094/**
10095 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10096 */
10097HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10098{
10099 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10100
10101 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10102 {
10103 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10104 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10105 }
10106 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10107}
10108
10109
10110/**
10111 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10112 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10113 */
10114HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10115{
10116 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10117
10118 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10119 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10120
10121 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10122
10123 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10124 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10125 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10126
10127 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10128 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10129 u64VmcsField &= UINT64_C(0xffffffff);
10130
10131 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10132 {
10133 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10134 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10135 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10136 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10137 }
10138
10139 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10140 return vmxHCExitVmread(pVCpu, pVmxTransient);
10141 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10142}
10143
10144
10145/**
10146 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10147 */
10148HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10149{
10150 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10151
10152 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10153 {
10154 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10155 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10156 }
10157
10158 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10159}
10160
10161
10162/**
10163 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10164 * Conditional VM-exit.
10165 */
10166HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10167{
10168 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10169
10170 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10171 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10172
10173 VBOXSTRICTRC rcStrict;
10174 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10175 switch (uAccessType)
10176 {
10177 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10178 {
10179 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10180 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10181 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10182 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10183
10184 bool fIntercept;
10185 switch (iCrReg)
10186 {
10187 case 0:
10188 case 4:
10189 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10190 break;
10191
10192 case 3:
10193 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10194 break;
10195
10196 case 8:
10197 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10198 break;
10199
10200 default:
10201 fIntercept = false;
10202 break;
10203 }
10204 if (fIntercept)
10205 {
10206 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10207 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10208 }
10209 else
10210 {
10211 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10212 AssertRCReturn(rc, rc);
10213 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10214 }
10215 break;
10216 }
10217
10218 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10219 {
10220 /*
10221 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10222 * CR2 reads do not cause a VM-exit.
10223 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10224 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10225 */
10226 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10227 if ( iCrReg == 3
10228 || iCrReg == 8)
10229 {
10230 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10231 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10232 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10233 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10234 {
10235 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10236 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10237 }
10238 else
10239 {
10240 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10241 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10242 }
10243 }
10244 else
10245 {
10246 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10247 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10248 }
10249 break;
10250 }
10251
10252 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10253 {
10254 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10255 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10256 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10257 if ( (uGstHostMask & X86_CR0_TS)
10258 && (uReadShadow & X86_CR0_TS))
10259 {
10260 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10261 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10262 }
10263 else
10264 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10265 break;
10266 }
10267
10268 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10269 {
10270 RTGCPTR GCPtrEffDst;
10271 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10272 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10273 if (fMemOperand)
10274 {
10275 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10276 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10277 }
10278 else
10279 GCPtrEffDst = NIL_RTGCPTR;
10280
10281 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10282 {
10283 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10284 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10285 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10286 }
10287 else
10288 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10289 break;
10290 }
10291
10292 default:
10293 {
10294 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10295 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10296 }
10297 }
10298
10299 if (rcStrict == VINF_IEM_RAISED_XCPT)
10300 {
10301 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10302 rcStrict = VINF_SUCCESS;
10303 }
10304 return rcStrict;
10305}
10306
10307
10308/**
10309 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10310 * Conditional VM-exit.
10311 */
10312HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10313{
10314 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10315
10316 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10317 {
10318 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10319 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10320 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10321 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10322 }
10323 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10324}
10325
10326
10327/**
10328 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10329 * Conditional VM-exit.
10330 */
10331HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10332{
10333 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10334
10335 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10336
10337 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10338 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10339 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10340
10341 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10342 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10343 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10344 {
10345 /*
10346 * IN/OUT instruction:
10347 * - Provides VM-exit instruction length.
10348 *
10349 * INS/OUTS instruction:
10350 * - Provides VM-exit instruction length.
10351 * - Provides Guest-linear address.
10352 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10353 */
10354 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10355 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10356
10357 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10358 pVmxTransient->ExitInstrInfo.u = 0;
10359 pVmxTransient->uGuestLinearAddr = 0;
10360
10361 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10362 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10363 if (fIOString)
10364 {
10365 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10366 if (fVmxInsOutsInfo)
10367 {
10368 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10369 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10370 }
10371 }
10372
10373 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10374 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10375 }
10376 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10377}
10378
10379
10380/**
10381 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10382 */
10383HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10384{
10385 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10386
10387 uint32_t fMsrpm;
10388 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10389 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10390 else
10391 fMsrpm = VMXMSRPM_EXIT_RD;
10392
10393 if (fMsrpm & VMXMSRPM_EXIT_RD)
10394 {
10395 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10396 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10397 }
10398 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10399}
10400
10401
10402/**
10403 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10404 */
10405HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10406{
10407 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10408
10409 uint32_t fMsrpm;
10410 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10411 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10412 else
10413 fMsrpm = VMXMSRPM_EXIT_WR;
10414
10415 if (fMsrpm & VMXMSRPM_EXIT_WR)
10416 {
10417 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10418 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10419 }
10420 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10421}
10422
10423
10424/**
10425 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10426 */
10427HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10428{
10429 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10430
10431 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10432 {
10433 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10434 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10435 }
10436 return vmxHCExitMwait(pVCpu, pVmxTransient);
10437}
10438
10439
10440/**
10441 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10442 * VM-exit.
10443 */
10444HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10445{
10446 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10447
10448 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10449 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10450 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10451 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10452}
10453
10454
10455/**
10456 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10457 */
10458HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10459{
10460 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10461
10462 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10463 {
10464 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10465 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10466 }
10467 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10468}
10469
10470
10471/**
10472 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10473 */
10474HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10475{
10476 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10477
10478 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10479 * PAUSE when executing a nested-guest? If it does not, we would not need
10480 * to check for the intercepts here. Just call VM-exit... */
10481
10482 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10483 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10484 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10485 {
10486 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10487 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10488 }
10489 return vmxHCExitPause(pVCpu, pVmxTransient);
10490}
10491
10492
10493/**
10494 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10495 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10496 */
10497HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10498{
10499 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10500
10501 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10502 {
10503 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10504 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10505 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10506 }
10507 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10508}
10509
10510
10511/**
10512 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10513 * VM-exit.
10514 */
10515HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10516{
10517 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10518
10519 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10520 | HMVMX_READ_EXIT_INSTR_LEN
10521 | HMVMX_READ_IDT_VECTORING_INFO
10522 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10523
10524 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10525
10526 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10527 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10528
10529 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10530 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10531 pVmxTransient->uIdtVectoringErrorCode);
10532 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10533}
10534
10535
10536/**
10537 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10538 * Conditional VM-exit.
10539 */
10540HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10541{
10542 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10543
10544 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10545 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10546 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10547}
10548
10549
10550/**
10551 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10552 * Conditional VM-exit.
10553 */
10554HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10555{
10556 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10557
10558 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10559 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10560 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10561}
10562
10563
10564/**
10565 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10566 */
10567HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10568{
10569 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10570
10571 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10572 {
10573 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10574 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10575 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10576 }
10577 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10578}
10579
10580
10581/**
10582 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10583 */
10584HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10585{
10586 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10587
10588 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10589 {
10590 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10591 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10592 }
10593 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10594}
10595
10596
10597/**
10598 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10599 */
10600HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10601{
10602 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10603
10604 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10605 {
10606 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10607 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10608 | HMVMX_READ_EXIT_INSTR_INFO
10609 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10610 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10611 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10612 }
10613 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10614}
10615
10616
10617/**
10618 * Nested-guest VM-exit handler for invalid-guest state
10619 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10620 */
10621HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10622{
10623 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10624
10625 /*
10626 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10627 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10628 * Handle it like it's in an invalid guest state of the outer guest.
10629 *
10630 * When the fast path is implemented, this should be changed to cause the corresponding
10631 * nested-guest VM-exit.
10632 */
10633 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10634}
10635
10636
10637/**
10638 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10639 * and only provide the instruction length.
10640 *
10641 * Unconditional VM-exit.
10642 */
10643HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10644{
10645 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10646
10647#ifdef VBOX_STRICT
10648 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10649 switch (pVmxTransient->uExitReason)
10650 {
10651 case VMX_EXIT_ENCLS:
10652 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10653 break;
10654
10655 case VMX_EXIT_VMFUNC:
10656 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10657 break;
10658 }
10659#endif
10660
10661 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10662 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10663}
10664
10665
10666/**
10667 * Nested-guest VM-exit handler for instructions that provide instruction length as
10668 * well as more information.
10669 *
10670 * Unconditional VM-exit.
10671 */
10672HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10673{
10674 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10675
10676# ifdef VBOX_STRICT
10677 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10678 switch (pVmxTransient->uExitReason)
10679 {
10680 case VMX_EXIT_GDTR_IDTR_ACCESS:
10681 case VMX_EXIT_LDTR_TR_ACCESS:
10682 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10683 break;
10684
10685 case VMX_EXIT_RDRAND:
10686 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10687 break;
10688
10689 case VMX_EXIT_RDSEED:
10690 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10691 break;
10692
10693 case VMX_EXIT_XSAVES:
10694 case VMX_EXIT_XRSTORS:
10695 /** @todo NSTVMX: Verify XSS-bitmap. */
10696 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10697 break;
10698
10699 case VMX_EXIT_UMWAIT:
10700 case VMX_EXIT_TPAUSE:
10701 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10702 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10703 break;
10704
10705 case VMX_EXIT_LOADIWKEY:
10706 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10707 break;
10708 }
10709# endif
10710
10711 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10712 | HMVMX_READ_EXIT_INSTR_LEN
10713 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10714 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10715 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10716}
10717
10718# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10719
10720/**
10721 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10722 * Conditional VM-exit.
10723 */
10724HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10725{
10726 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10727 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10728
10729 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10730 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10731 {
10732 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10733 | HMVMX_READ_EXIT_INSTR_LEN
10734 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10735 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10736 | HMVMX_READ_IDT_VECTORING_INFO
10737 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10738 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10739 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10740 AssertRCReturn(rc, rc);
10741
10742 /*
10743 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10744 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10745 * it's its problem to deal with that issue and we'll clear the recovered event.
10746 */
10747 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10748 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10749 { /*likely*/ }
10750 else
10751 {
10752 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10753 return rcStrict;
10754 }
10755 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10756
10757 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10758 uint64_t const uExitQual = pVmxTransient->uExitQual;
10759
10760 RTGCPTR GCPtrNestedFault;
10761 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10762 if (fIsLinearAddrValid)
10763 {
10764 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10765 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10766 }
10767 else
10768 GCPtrNestedFault = 0;
10769
10770 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10771 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10772 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10773 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10774 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10775
10776 PGMPTWALK Walk;
10777 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10778 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10779 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10780 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10781 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10782 if (RT_SUCCESS(rcStrict))
10783 return rcStrict;
10784
10785 if (fClearEventOnForward)
10786 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10787
10788 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10789 pVmxTransient->uIdtVectoringErrorCode);
10790 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10791 {
10792 VMXVEXITINFO const ExitInfo
10793 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10794 pVmxTransient->uExitQual,
10795 pVmxTransient->cbExitInstr,
10796 pVmxTransient->uGuestLinearAddr,
10797 pVmxTransient->uGuestPhysicalAddr);
10798 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10799 }
10800
10801 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10802 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10803 }
10804
10805 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10806}
10807
10808
10809/**
10810 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10811 * Conditional VM-exit.
10812 */
10813HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10814{
10815 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10816 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10817
10818 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10819 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10820 {
10821 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10822 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10823 AssertRCReturn(rc, rc);
10824
10825 PGMPTWALK Walk;
10826 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10827 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10828 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10829 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10830 0 /* GCPtrNestedFault */, &Walk);
10831 if (RT_SUCCESS(rcStrict))
10832 {
10833 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10834 return rcStrict;
10835 }
10836
10837 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10838 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10839 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10840
10841 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10842 pVmxTransient->uIdtVectoringErrorCode);
10843 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10844 }
10845
10846 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10847}
10848
10849# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10850
10851/** @} */
10852#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10853
10854
10855/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10856 * probes.
10857 *
10858 * The following few functions and associated structure contains the bloat
10859 * necessary for providing detailed debug events and dtrace probes as well as
10860 * reliable host side single stepping. This works on the principle of
10861 * "subclassing" the normal execution loop and workers. We replace the loop
10862 * method completely and override selected helpers to add necessary adjustments
10863 * to their core operation.
10864 *
10865 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10866 * any performance for debug and analysis features.
10867 *
10868 * @{
10869 */
10870
10871/**
10872 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10873 * the debug run loop.
10874 */
10875typedef struct VMXRUNDBGSTATE
10876{
10877 /** The RIP we started executing at. This is for detecting that we stepped. */
10878 uint64_t uRipStart;
10879 /** The CS we started executing with. */
10880 uint16_t uCsStart;
10881
10882 /** Whether we've actually modified the 1st execution control field. */
10883 bool fModifiedProcCtls : 1;
10884 /** Whether we've actually modified the 2nd execution control field. */
10885 bool fModifiedProcCtls2 : 1;
10886 /** Whether we've actually modified the exception bitmap. */
10887 bool fModifiedXcptBitmap : 1;
10888
10889 /** We desire the modified the CR0 mask to be cleared. */
10890 bool fClearCr0Mask : 1;
10891 /** We desire the modified the CR4 mask to be cleared. */
10892 bool fClearCr4Mask : 1;
10893 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10894 uint32_t fCpe1Extra;
10895 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10896 uint32_t fCpe1Unwanted;
10897 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10898 uint32_t fCpe2Extra;
10899 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10900 uint32_t bmXcptExtra;
10901 /** The sequence number of the Dtrace provider settings the state was
10902 * configured against. */
10903 uint32_t uDtraceSettingsSeqNo;
10904 /** VM-exits to check (one bit per VM-exit). */
10905 uint32_t bmExitsToCheck[3];
10906
10907 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10908 uint32_t fProcCtlsInitial;
10909 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10910 uint32_t fProcCtls2Initial;
10911 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10912 uint32_t bmXcptInitial;
10913} VMXRUNDBGSTATE;
10914AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10915typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10916
10917
10918/**
10919 * Initializes the VMXRUNDBGSTATE structure.
10920 *
10921 * @param pVCpu The cross context virtual CPU structure of the
10922 * calling EMT.
10923 * @param pVmxTransient The VMX-transient structure.
10924 * @param pDbgState The debug state to initialize.
10925 */
10926static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10927{
10928 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10929 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10930
10931 pDbgState->fModifiedProcCtls = false;
10932 pDbgState->fModifiedProcCtls2 = false;
10933 pDbgState->fModifiedXcptBitmap = false;
10934 pDbgState->fClearCr0Mask = false;
10935 pDbgState->fClearCr4Mask = false;
10936 pDbgState->fCpe1Extra = 0;
10937 pDbgState->fCpe1Unwanted = 0;
10938 pDbgState->fCpe2Extra = 0;
10939 pDbgState->bmXcptExtra = 0;
10940 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10941 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10942 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10943}
10944
10945
10946/**
10947 * Updates the VMSC fields with changes requested by @a pDbgState.
10948 *
10949 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10950 * immediately before executing guest code, i.e. when interrupts are disabled.
10951 * We don't check status codes here as we cannot easily assert or return in the
10952 * latter case.
10953 *
10954 * @param pVCpu The cross context virtual CPU structure.
10955 * @param pVmxTransient The VMX-transient structure.
10956 * @param pDbgState The debug state.
10957 */
10958static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10959{
10960 /*
10961 * Ensure desired flags in VMCS control fields are set.
10962 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10963 *
10964 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10965 * there should be no stale data in pCtx at this point.
10966 */
10967 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10968 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10969 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10970 {
10971 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10972 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10973 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10974 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10975 pDbgState->fModifiedProcCtls = true;
10976 }
10977
10978 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10979 {
10980 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10981 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10982 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10983 pDbgState->fModifiedProcCtls2 = true;
10984 }
10985
10986 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10987 {
10988 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10989 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10990 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10991 pDbgState->fModifiedXcptBitmap = true;
10992 }
10993
10994 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10995 {
10996 pVmcsInfo->u64Cr0Mask = 0;
10997 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10998 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10999 }
11000
11001 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11002 {
11003 pVmcsInfo->u64Cr4Mask = 0;
11004 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11005 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11006 }
11007
11008 NOREF(pVCpu);
11009}
11010
11011
11012/**
11013 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11014 * re-entry next time around.
11015 *
11016 * @returns Strict VBox status code (i.e. informational status codes too).
11017 * @param pVCpu The cross context virtual CPU structure.
11018 * @param pVmxTransient The VMX-transient structure.
11019 * @param pDbgState The debug state.
11020 * @param rcStrict The return code from executing the guest using single
11021 * stepping.
11022 */
11023static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11024 VBOXSTRICTRC rcStrict)
11025{
11026 /*
11027 * Restore VM-exit control settings as we may not reenter this function the
11028 * next time around.
11029 */
11030 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11031
11032 /* We reload the initial value, trigger what we can of recalculations the
11033 next time around. From the looks of things, that's all that's required atm. */
11034 if (pDbgState->fModifiedProcCtls)
11035 {
11036 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11037 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11038 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11039 AssertRC(rc2);
11040 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11041 }
11042
11043 /* We're currently the only ones messing with this one, so just restore the
11044 cached value and reload the field. */
11045 if ( pDbgState->fModifiedProcCtls2
11046 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11047 {
11048 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11049 AssertRC(rc2);
11050 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11051 }
11052
11053 /* If we've modified the exception bitmap, we restore it and trigger
11054 reloading and partial recalculation the next time around. */
11055 if (pDbgState->fModifiedXcptBitmap)
11056 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11057
11058 return rcStrict;
11059}
11060
11061
11062/**
11063 * Configures VM-exit controls for current DBGF and DTrace settings.
11064 *
11065 * This updates @a pDbgState and the VMCS execution control fields to reflect
11066 * the necessary VM-exits demanded by DBGF and DTrace.
11067 *
11068 * @param pVCpu The cross context virtual CPU structure.
11069 * @param pVmxTransient The VMX-transient structure. May update
11070 * fUpdatedTscOffsettingAndPreemptTimer.
11071 * @param pDbgState The debug state.
11072 */
11073static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11074{
11075#ifndef IN_NEM_DARWIN
11076 /*
11077 * Take down the dtrace serial number so we can spot changes.
11078 */
11079 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11080 ASMCompilerBarrier();
11081#endif
11082
11083 /*
11084 * We'll rebuild most of the middle block of data members (holding the
11085 * current settings) as we go along here, so start by clearing it all.
11086 */
11087 pDbgState->bmXcptExtra = 0;
11088 pDbgState->fCpe1Extra = 0;
11089 pDbgState->fCpe1Unwanted = 0;
11090 pDbgState->fCpe2Extra = 0;
11091 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11092 pDbgState->bmExitsToCheck[i] = 0;
11093
11094 /*
11095 * Software interrupts (INT XXh) - no idea how to trigger these...
11096 */
11097 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11098 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11099 || VBOXVMM_INT_SOFTWARE_ENABLED())
11100 {
11101 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11102 }
11103
11104 /*
11105 * INT3 breakpoints - triggered by #BP exceptions.
11106 */
11107 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11108 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11109
11110 /*
11111 * Exception bitmap and XCPT events+probes.
11112 */
11113 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11114 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11115 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11116
11117 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11118 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11119 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11120 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11121 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11122 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11123 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11124 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11125 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11126 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11127 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11128 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11129 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11130 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11131 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11132 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11133 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11134 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11135
11136 if (pDbgState->bmXcptExtra)
11137 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11138
11139 /*
11140 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11141 *
11142 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11143 * So, when adding/changing/removing please don't forget to update it.
11144 *
11145 * Some of the macros are picking up local variables to save horizontal space,
11146 * (being able to see it in a table is the lesser evil here).
11147 */
11148#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11149 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11150 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11151#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11152 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11153 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11154 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11155 } else do { } while (0)
11156#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11157 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11158 { \
11159 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11160 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11161 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11162 } else do { } while (0)
11163#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11164 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11165 { \
11166 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11167 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11168 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11169 } else do { } while (0)
11170#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11171 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11172 { \
11173 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11174 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11175 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11176 } else do { } while (0)
11177
11178 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11179 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11180 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11181 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11182 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11183
11184 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11185 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11186 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11187 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11188 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11189 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11190 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11191 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11192 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11193 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11194 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11195 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11196 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11197 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11198 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11199 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11200 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11201 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11202 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11203 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11204 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11205 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11206 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11207 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11208 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11209 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11210 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11211 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11212 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11213 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11214 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11215 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11216 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11217 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11218 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11219 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11220
11221 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11222 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11223 {
11224 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11225 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11226 AssertRC(rc);
11227
11228#if 0 /** @todo fix me */
11229 pDbgState->fClearCr0Mask = true;
11230 pDbgState->fClearCr4Mask = true;
11231#endif
11232 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11233 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11234 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11235 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11236 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11237 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11238 require clearing here and in the loop if we start using it. */
11239 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11240 }
11241 else
11242 {
11243 if (pDbgState->fClearCr0Mask)
11244 {
11245 pDbgState->fClearCr0Mask = false;
11246 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11247 }
11248 if (pDbgState->fClearCr4Mask)
11249 {
11250 pDbgState->fClearCr4Mask = false;
11251 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11252 }
11253 }
11254 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11255 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11256
11257 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11258 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11259 {
11260 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11261 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11262 }
11263 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11264 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11265
11266 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11267 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11268 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11269 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11270 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11271 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11272 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11273 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11274#if 0 /** @todo too slow, fix handler. */
11275 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11276#endif
11277 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11278
11279 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11280 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11281 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11282 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11283 {
11284 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11285 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11286 }
11287 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11288 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11289 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11290 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11291
11292 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11293 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11294 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11295 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11296 {
11297 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11298 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11299 }
11300 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11301 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11302 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11303 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11304
11305 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11306 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11307 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11308 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11309 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11310 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11311 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11312 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11313 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11314 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11315 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11316 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11317 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11318 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11319 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11320 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11321 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11322 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11323 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11324 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11325 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11326 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11327
11328#undef IS_EITHER_ENABLED
11329#undef SET_ONLY_XBM_IF_EITHER_EN
11330#undef SET_CPE1_XBM_IF_EITHER_EN
11331#undef SET_CPEU_XBM_IF_EITHER_EN
11332#undef SET_CPE2_XBM_IF_EITHER_EN
11333
11334 /*
11335 * Sanitize the control stuff.
11336 */
11337 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11338 if (pDbgState->fCpe2Extra)
11339 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11340 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11341 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11342#ifndef IN_NEM_DARWIN
11343 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11344 {
11345 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11346 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11347 }
11348#else
11349 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11350 {
11351 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11352 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11353 }
11354#endif
11355
11356 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11357 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11358 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11359 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11360}
11361
11362
11363/**
11364 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11365 * appropriate.
11366 *
11367 * The caller has checked the VM-exit against the
11368 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11369 * already, so we don't have to do that either.
11370 *
11371 * @returns Strict VBox status code (i.e. informational status codes too).
11372 * @param pVCpu The cross context virtual CPU structure.
11373 * @param pVmxTransient The VMX-transient structure.
11374 * @param uExitReason The VM-exit reason.
11375 *
11376 * @remarks The name of this function is displayed by dtrace, so keep it short
11377 * and to the point. No longer than 33 chars long, please.
11378 */
11379static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11380{
11381 /*
11382 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11383 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11384 *
11385 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11386 * does. Must add/change/remove both places. Same ordering, please.
11387 *
11388 * Added/removed events must also be reflected in the next section
11389 * where we dispatch dtrace events.
11390 */
11391 bool fDtrace1 = false;
11392 bool fDtrace2 = false;
11393 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11394 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11395 uint32_t uEventArg = 0;
11396#define SET_EXIT(a_EventSubName) \
11397 do { \
11398 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11399 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11400 } while (0)
11401#define SET_BOTH(a_EventSubName) \
11402 do { \
11403 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11404 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11405 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11406 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11407 } while (0)
11408 switch (uExitReason)
11409 {
11410 case VMX_EXIT_MTF:
11411 return vmxHCExitMtf(pVCpu, pVmxTransient);
11412
11413 case VMX_EXIT_XCPT_OR_NMI:
11414 {
11415 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11416 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11417 {
11418 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11419 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11420 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11421 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11422 {
11423 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11424 {
11425 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11426 uEventArg = pVmxTransient->uExitIntErrorCode;
11427 }
11428 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11429 switch (enmEvent1)
11430 {
11431 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11432 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11433 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11434 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11435 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11436 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11437 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11438 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11439 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11440 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11441 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11442 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11443 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11444 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11445 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11446 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11447 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11448 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11449 default: break;
11450 }
11451 }
11452 else
11453 AssertFailed();
11454 break;
11455
11456 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11457 uEventArg = idxVector;
11458 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11459 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11460 break;
11461 }
11462 break;
11463 }
11464
11465 case VMX_EXIT_TRIPLE_FAULT:
11466 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11467 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11468 break;
11469 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11470 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11471 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11472 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11473 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11474
11475 /* Instruction specific VM-exits: */
11476 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11477 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11478 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11479 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11480 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11481 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11482 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11483 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11484 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11485 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11486 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11487 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11488 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11489 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11490 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11491 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11492 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11493 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11494 case VMX_EXIT_MOV_CRX:
11495 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11496 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11497 SET_BOTH(CRX_READ);
11498 else
11499 SET_BOTH(CRX_WRITE);
11500 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11501 break;
11502 case VMX_EXIT_MOV_DRX:
11503 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11504 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11505 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11506 SET_BOTH(DRX_READ);
11507 else
11508 SET_BOTH(DRX_WRITE);
11509 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11510 break;
11511 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11512 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11513 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11514 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11515 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11516 case VMX_EXIT_GDTR_IDTR_ACCESS:
11517 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11518 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11519 {
11520 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11521 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11522 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11523 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11524 }
11525 break;
11526
11527 case VMX_EXIT_LDTR_TR_ACCESS:
11528 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11529 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11530 {
11531 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11532 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11533 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11534 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11535 }
11536 break;
11537
11538 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11539 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11540 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11541 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11542 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11543 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11544 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11545 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11546 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11547 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11548 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11549
11550 /* Events that aren't relevant at this point. */
11551 case VMX_EXIT_EXT_INT:
11552 case VMX_EXIT_INT_WINDOW:
11553 case VMX_EXIT_NMI_WINDOW:
11554 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11555 case VMX_EXIT_PREEMPT_TIMER:
11556 case VMX_EXIT_IO_INSTR:
11557 break;
11558
11559 /* Errors and unexpected events. */
11560 case VMX_EXIT_INIT_SIGNAL:
11561 case VMX_EXIT_SIPI:
11562 case VMX_EXIT_IO_SMI:
11563 case VMX_EXIT_SMI:
11564 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11565 case VMX_EXIT_ERR_MSR_LOAD:
11566 case VMX_EXIT_ERR_MACHINE_CHECK:
11567 case VMX_EXIT_PML_FULL:
11568 case VMX_EXIT_VIRTUALIZED_EOI:
11569 break;
11570
11571 default:
11572 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11573 break;
11574 }
11575#undef SET_BOTH
11576#undef SET_EXIT
11577
11578 /*
11579 * Dtrace tracepoints go first. We do them here at once so we don't
11580 * have to copy the guest state saving and stuff a few dozen times.
11581 * Down side is that we've got to repeat the switch, though this time
11582 * we use enmEvent since the probes are a subset of what DBGF does.
11583 */
11584 if (fDtrace1 || fDtrace2)
11585 {
11586 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11587 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11588 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11589 switch (enmEvent1)
11590 {
11591 /** @todo consider which extra parameters would be helpful for each probe. */
11592 case DBGFEVENT_END: break;
11593 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11594 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11595 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11596 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11597 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11598 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11599 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11600 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11601 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11602 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11603 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11604 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11605 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11606 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11607 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11608 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11609 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11610 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11611 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11612 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11613 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11614 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11615 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11616 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11617 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11618 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11619 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11620 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11621 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11622 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11623 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11624 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11625 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11626 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11627 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11628 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11629 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11630 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11631 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11632 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11633 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11634 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11635 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11636 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11637 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11638 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11639 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11640 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11641 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11642 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11643 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11644 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11645 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11646 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11647 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11648 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11649 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11650 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11651 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11652 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11653 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11654 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11655 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11656 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11657 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11658 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11659 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11660 }
11661 switch (enmEvent2)
11662 {
11663 /** @todo consider which extra parameters would be helpful for each probe. */
11664 case DBGFEVENT_END: break;
11665 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11666 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11667 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11668 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11669 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11670 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11671 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11672 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11673 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11674 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11675 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11676 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11677 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11678 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11679 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11680 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11681 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11682 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11683 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11684 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11685 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11686 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11687 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11688 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11689 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11690 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11691 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11692 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11693 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11694 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11695 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11696 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11697 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11698 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11699 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11700 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11701 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11702 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11703 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11704 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11705 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11706 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11707 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11708 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11709 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11710 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11711 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11712 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11713 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11714 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11715 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11716 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11717 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11718 }
11719 }
11720
11721 /*
11722 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11723 * the DBGF call will do a full check).
11724 *
11725 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11726 * Note! If we have to events, we prioritize the first, i.e. the instruction
11727 * one, in order to avoid event nesting.
11728 */
11729 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11730 if ( enmEvent1 != DBGFEVENT_END
11731 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11732 {
11733 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11734 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11735 if (rcStrict != VINF_SUCCESS)
11736 return rcStrict;
11737 }
11738 else if ( enmEvent2 != DBGFEVENT_END
11739 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11740 {
11741 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11742 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11743 if (rcStrict != VINF_SUCCESS)
11744 return rcStrict;
11745 }
11746
11747 return VINF_SUCCESS;
11748}
11749
11750
11751/**
11752 * Single-stepping VM-exit filtering.
11753 *
11754 * This is preprocessing the VM-exits and deciding whether we've gotten far
11755 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11756 * handling is performed.
11757 *
11758 * @returns Strict VBox status code (i.e. informational status codes too).
11759 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11760 * @param pVmxTransient The VMX-transient structure.
11761 * @param pDbgState The debug state.
11762 */
11763DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11764{
11765 /*
11766 * Expensive (saves context) generic dtrace VM-exit probe.
11767 */
11768 uint32_t const uExitReason = pVmxTransient->uExitReason;
11769 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11770 { /* more likely */ }
11771 else
11772 {
11773 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11774 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11775 AssertRC(rc);
11776 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11777 }
11778
11779#ifndef IN_NEM_DARWIN
11780 /*
11781 * Check for host NMI, just to get that out of the way.
11782 */
11783 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11784 { /* normally likely */ }
11785 else
11786 {
11787 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11788 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11789 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11790 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11791 }
11792#endif
11793
11794 /*
11795 * Check for single stepping event if we're stepping.
11796 */
11797 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11798 {
11799 switch (uExitReason)
11800 {
11801 case VMX_EXIT_MTF:
11802 return vmxHCExitMtf(pVCpu, pVmxTransient);
11803
11804 /* Various events: */
11805 case VMX_EXIT_XCPT_OR_NMI:
11806 case VMX_EXIT_EXT_INT:
11807 case VMX_EXIT_TRIPLE_FAULT:
11808 case VMX_EXIT_INT_WINDOW:
11809 case VMX_EXIT_NMI_WINDOW:
11810 case VMX_EXIT_TASK_SWITCH:
11811 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11812 case VMX_EXIT_APIC_ACCESS:
11813 case VMX_EXIT_EPT_VIOLATION:
11814 case VMX_EXIT_EPT_MISCONFIG:
11815 case VMX_EXIT_PREEMPT_TIMER:
11816
11817 /* Instruction specific VM-exits: */
11818 case VMX_EXIT_CPUID:
11819 case VMX_EXIT_GETSEC:
11820 case VMX_EXIT_HLT:
11821 case VMX_EXIT_INVD:
11822 case VMX_EXIT_INVLPG:
11823 case VMX_EXIT_RDPMC:
11824 case VMX_EXIT_RDTSC:
11825 case VMX_EXIT_RSM:
11826 case VMX_EXIT_VMCALL:
11827 case VMX_EXIT_VMCLEAR:
11828 case VMX_EXIT_VMLAUNCH:
11829 case VMX_EXIT_VMPTRLD:
11830 case VMX_EXIT_VMPTRST:
11831 case VMX_EXIT_VMREAD:
11832 case VMX_EXIT_VMRESUME:
11833 case VMX_EXIT_VMWRITE:
11834 case VMX_EXIT_VMXOFF:
11835 case VMX_EXIT_VMXON:
11836 case VMX_EXIT_MOV_CRX:
11837 case VMX_EXIT_MOV_DRX:
11838 case VMX_EXIT_IO_INSTR:
11839 case VMX_EXIT_RDMSR:
11840 case VMX_EXIT_WRMSR:
11841 case VMX_EXIT_MWAIT:
11842 case VMX_EXIT_MONITOR:
11843 case VMX_EXIT_PAUSE:
11844 case VMX_EXIT_GDTR_IDTR_ACCESS:
11845 case VMX_EXIT_LDTR_TR_ACCESS:
11846 case VMX_EXIT_INVEPT:
11847 case VMX_EXIT_RDTSCP:
11848 case VMX_EXIT_INVVPID:
11849 case VMX_EXIT_WBINVD:
11850 case VMX_EXIT_XSETBV:
11851 case VMX_EXIT_RDRAND:
11852 case VMX_EXIT_INVPCID:
11853 case VMX_EXIT_VMFUNC:
11854 case VMX_EXIT_RDSEED:
11855 case VMX_EXIT_XSAVES:
11856 case VMX_EXIT_XRSTORS:
11857 {
11858 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11859 AssertRCReturn(rc, rc);
11860 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11861 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11862 return VINF_EM_DBG_STEPPED;
11863 break;
11864 }
11865
11866 /* Errors and unexpected events: */
11867 case VMX_EXIT_INIT_SIGNAL:
11868 case VMX_EXIT_SIPI:
11869 case VMX_EXIT_IO_SMI:
11870 case VMX_EXIT_SMI:
11871 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11872 case VMX_EXIT_ERR_MSR_LOAD:
11873 case VMX_EXIT_ERR_MACHINE_CHECK:
11874 case VMX_EXIT_PML_FULL:
11875 case VMX_EXIT_VIRTUALIZED_EOI:
11876 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11877 break;
11878
11879 default:
11880 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11881 break;
11882 }
11883 }
11884
11885 /*
11886 * Check for debugger event breakpoints and dtrace probes.
11887 */
11888 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11889 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11890 {
11891 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11892 if (rcStrict != VINF_SUCCESS)
11893 return rcStrict;
11894 }
11895
11896 /*
11897 * Normal processing.
11898 */
11899#ifdef HMVMX_USE_FUNCTION_TABLE
11900 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11901#else
11902 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11903#endif
11904}
11905
11906/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette