VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 97206

Last change on this file since 97206 was 97206, checked in by vboxsync, 2 years ago

VMM/EM,HMVMXR0: Replaced EMInterpretRdpmc with IEMExecDecodedRdpmc (not really tested as none of my guests use it).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 520.1 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 97206 2022-10-18 12:48:35Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330
331 /* 16-bit guest-state fields. */
332 VMX_VMCS16_GUEST_ES_SEL,
333 VMX_VMCS16_GUEST_CS_SEL,
334 VMX_VMCS16_GUEST_SS_SEL,
335 VMX_VMCS16_GUEST_DS_SEL,
336 VMX_VMCS16_GUEST_FS_SEL,
337 VMX_VMCS16_GUEST_GS_SEL,
338 VMX_VMCS16_GUEST_LDTR_SEL,
339 VMX_VMCS16_GUEST_TR_SEL,
340 VMX_VMCS16_GUEST_INTR_STATUS,
341 VMX_VMCS16_GUEST_PML_INDEX,
342
343 /* 16-bits host-state fields. */
344 VMX_VMCS16_HOST_ES_SEL,
345 VMX_VMCS16_HOST_CS_SEL,
346 VMX_VMCS16_HOST_SS_SEL,
347 VMX_VMCS16_HOST_DS_SEL,
348 VMX_VMCS16_HOST_FS_SEL,
349 VMX_VMCS16_HOST_GS_SEL,
350 VMX_VMCS16_HOST_TR_SEL,
351
352 /* 64-bit control fields. */
353 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
355 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
357 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
358 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
361 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
365 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
367 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
369 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
370 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
371 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
373 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
375 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
377 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
379 VMX_VMCS64_CTRL_EPTP_FULL,
380 VMX_VMCS64_CTRL_EPTP_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
389 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
390 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
391 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
395 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
397 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
401 VMX_VMCS64_CTRL_SPPTP_FULL,
402 VMX_VMCS64_CTRL_SPPTP_HIGH,
403 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
405 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
406 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
407 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
409
410 /* 64-bit read-only data fields. */
411 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
413
414 /* 64-bit guest-state fields. */
415 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
417 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
418 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
419 VMX_VMCS64_GUEST_PAT_FULL,
420 VMX_VMCS64_GUEST_PAT_HIGH,
421 VMX_VMCS64_GUEST_EFER_FULL,
422 VMX_VMCS64_GUEST_EFER_HIGH,
423 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
425 VMX_VMCS64_GUEST_PDPTE0_FULL,
426 VMX_VMCS64_GUEST_PDPTE0_HIGH,
427 VMX_VMCS64_GUEST_PDPTE1_FULL,
428 VMX_VMCS64_GUEST_PDPTE1_HIGH,
429 VMX_VMCS64_GUEST_PDPTE2_FULL,
430 VMX_VMCS64_GUEST_PDPTE2_HIGH,
431 VMX_VMCS64_GUEST_PDPTE3_FULL,
432 VMX_VMCS64_GUEST_PDPTE3_HIGH,
433 VMX_VMCS64_GUEST_BNDCFGS_FULL,
434 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
435 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
436 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
437 VMX_VMCS64_GUEST_PKRS_FULL,
438 VMX_VMCS64_GUEST_PKRS_HIGH,
439
440 /* 64-bit host-state fields. */
441 VMX_VMCS64_HOST_PAT_FULL,
442 VMX_VMCS64_HOST_PAT_HIGH,
443 VMX_VMCS64_HOST_EFER_FULL,
444 VMX_VMCS64_HOST_EFER_HIGH,
445 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
447 VMX_VMCS64_HOST_PKRS_FULL,
448 VMX_VMCS64_HOST_PKRS_HIGH,
449
450 /* 32-bit control fields. */
451 VMX_VMCS32_CTRL_PIN_EXEC,
452 VMX_VMCS32_CTRL_PROC_EXEC,
453 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
454 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
456 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
457 VMX_VMCS32_CTRL_EXIT,
458 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
459 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY,
461 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
462 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
463 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
464 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
465 VMX_VMCS32_CTRL_TPR_THRESHOLD,
466 VMX_VMCS32_CTRL_PROC_EXEC2,
467 VMX_VMCS32_CTRL_PLE_GAP,
468 VMX_VMCS32_CTRL_PLE_WINDOW,
469
470 /* 32-bits read-only fields. */
471 VMX_VMCS32_RO_VM_INSTR_ERROR,
472 VMX_VMCS32_RO_EXIT_REASON,
473 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
475 VMX_VMCS32_RO_IDT_VECTORING_INFO,
476 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
477 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
478 VMX_VMCS32_RO_EXIT_INSTR_INFO,
479
480 /* 32-bit guest-state fields. */
481 VMX_VMCS32_GUEST_ES_LIMIT,
482 VMX_VMCS32_GUEST_CS_LIMIT,
483 VMX_VMCS32_GUEST_SS_LIMIT,
484 VMX_VMCS32_GUEST_DS_LIMIT,
485 VMX_VMCS32_GUEST_FS_LIMIT,
486 VMX_VMCS32_GUEST_GS_LIMIT,
487 VMX_VMCS32_GUEST_LDTR_LIMIT,
488 VMX_VMCS32_GUEST_TR_LIMIT,
489 VMX_VMCS32_GUEST_GDTR_LIMIT,
490 VMX_VMCS32_GUEST_IDTR_LIMIT,
491 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_INT_STATE,
500 VMX_VMCS32_GUEST_ACTIVITY_STATE,
501 VMX_VMCS32_GUEST_SMBASE,
502 VMX_VMCS32_GUEST_SYSENTER_CS,
503 VMX_VMCS32_PREEMPT_TIMER_VALUE,
504
505 /* 32-bit host-state fields. */
506 VMX_VMCS32_HOST_SYSENTER_CS,
507
508 /* Natural-width control fields. */
509 VMX_VMCS_CTRL_CR0_MASK,
510 VMX_VMCS_CTRL_CR4_MASK,
511 VMX_VMCS_CTRL_CR0_READ_SHADOW,
512 VMX_VMCS_CTRL_CR4_READ_SHADOW,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
517
518 /* Natural-width read-only data fields. */
519 VMX_VMCS_RO_EXIT_QUALIFICATION,
520 VMX_VMCS_RO_IO_RCX,
521 VMX_VMCS_RO_IO_RSI,
522 VMX_VMCS_RO_IO_RDI,
523 VMX_VMCS_RO_IO_RIP,
524 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
525
526 /* Natural-width guest-state field */
527 VMX_VMCS_GUEST_CR0,
528 VMX_VMCS_GUEST_CR3,
529 VMX_VMCS_GUEST_CR4,
530 VMX_VMCS_GUEST_ES_BASE,
531 VMX_VMCS_GUEST_CS_BASE,
532 VMX_VMCS_GUEST_SS_BASE,
533 VMX_VMCS_GUEST_DS_BASE,
534 VMX_VMCS_GUEST_FS_BASE,
535 VMX_VMCS_GUEST_GS_BASE,
536 VMX_VMCS_GUEST_LDTR_BASE,
537 VMX_VMCS_GUEST_TR_BASE,
538 VMX_VMCS_GUEST_GDTR_BASE,
539 VMX_VMCS_GUEST_IDTR_BASE,
540 VMX_VMCS_GUEST_DR7,
541 VMX_VMCS_GUEST_RSP,
542 VMX_VMCS_GUEST_RIP,
543 VMX_VMCS_GUEST_RFLAGS,
544 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
545 VMX_VMCS_GUEST_SYSENTER_ESP,
546 VMX_VMCS_GUEST_SYSENTER_EIP,
547 VMX_VMCS_GUEST_S_CET,
548 VMX_VMCS_GUEST_SSP,
549 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
550
551 /* Natural-width host-state fields */
552 VMX_VMCS_HOST_CR0,
553 VMX_VMCS_HOST_CR3,
554 VMX_VMCS_HOST_CR4,
555 VMX_VMCS_HOST_FS_BASE,
556 VMX_VMCS_HOST_GS_BASE,
557 VMX_VMCS_HOST_TR_BASE,
558 VMX_VMCS_HOST_GDTR_BASE,
559 VMX_VMCS_HOST_IDTR_BASE,
560 VMX_VMCS_HOST_SYSENTER_ESP,
561 VMX_VMCS_HOST_SYSENTER_EIP,
562 VMX_VMCS_HOST_RSP,
563 VMX_VMCS_HOST_RIP,
564 VMX_VMCS_HOST_S_CET,
565 VMX_VMCS_HOST_SSP,
566 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
567};
568#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
569
570#ifdef HMVMX_USE_FUNCTION_TABLE
571/**
572 * VMX_EXIT dispatch table.
573 */
574static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
575{
576 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
577 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
578 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
579 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
580 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
581 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
582 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
583 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
584 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
585 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
586 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
587 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
588 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
589 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
590 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
591 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
592 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
593 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
594 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
596 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
597 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
598 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
599 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
600 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
601 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
602 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
603 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
604 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
605#else
606 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
607 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
608 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
609 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
610 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
611 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
612 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
613 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
614 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
615#endif
616 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
617 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
618 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
619 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
620 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
621 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
622 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
623 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
624 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
625 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
626 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
627 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
628 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
629 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
630 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
632 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
633 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
634 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
635 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
636 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
637 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
639 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
640#else
641 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
642#endif
643 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
644 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
646 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
647#else
648 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
651 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
652 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
653 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
654 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
655 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
656 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
657 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
658 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
659 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
660 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
661 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
662 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
663 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
664 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
665 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
666};
667#endif /* HMVMX_USE_FUNCTION_TABLE */
668
669#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
670static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
671{
672 /* 0 */ "(Not Used)",
673 /* 1 */ "VMCALL executed in VMX root operation.",
674 /* 2 */ "VMCLEAR with invalid physical address.",
675 /* 3 */ "VMCLEAR with VMXON pointer.",
676 /* 4 */ "VMLAUNCH with non-clear VMCS.",
677 /* 5 */ "VMRESUME with non-launched VMCS.",
678 /* 6 */ "VMRESUME after VMXOFF",
679 /* 7 */ "VM-entry with invalid control fields.",
680 /* 8 */ "VM-entry with invalid host state fields.",
681 /* 9 */ "VMPTRLD with invalid physical address.",
682 /* 10 */ "VMPTRLD with VMXON pointer.",
683 /* 11 */ "VMPTRLD with incorrect revision identifier.",
684 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
685 /* 13 */ "VMWRITE to read-only VMCS component.",
686 /* 14 */ "(Not Used)",
687 /* 15 */ "VMXON executed in VMX root operation.",
688 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
689 /* 17 */ "VM-entry with non-launched executing VMCS.",
690 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
691 /* 19 */ "VMCALL with non-clear VMCS.",
692 /* 20 */ "VMCALL with invalid VM-exit control fields.",
693 /* 21 */ "(Not Used)",
694 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
695 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
696 /* 24 */ "VMCALL with invalid SMM-monitor features.",
697 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
698 /* 26 */ "VM-entry with events blocked by MOV SS.",
699 /* 27 */ "(Not Used)",
700 /* 28 */ "Invalid operand to INVEPT/INVVPID."
701};
702#endif /* VBOX_STRICT && LOG_ENABLED */
703
704
705/**
706 * Gets the CR0 guest/host mask.
707 *
708 * These bits typically does not change through the lifetime of a VM. Any bit set in
709 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
710 * by the guest.
711 *
712 * @returns The CR0 guest/host mask.
713 * @param pVCpu The cross context virtual CPU structure.
714 */
715static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
716{
717 /*
718 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
719 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
720 *
721 * Furthermore, modifications to any bits that are reserved/unspecified currently
722 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
723 * when future CPUs specify and use currently reserved/unspecified bits.
724 */
725 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
726 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
727 * and @bugref{6944}. */
728 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
729 return ( X86_CR0_PE
730 | X86_CR0_NE
731 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
732 | X86_CR0_PG
733 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
734}
735
736
737/**
738 * Gets the CR4 guest/host mask.
739 *
740 * These bits typically does not change through the lifetime of a VM. Any bit set in
741 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
742 * by the guest.
743 *
744 * @returns The CR4 guest/host mask.
745 * @param pVCpu The cross context virtual CPU structure.
746 */
747static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
748{
749 /*
750 * We construct a mask of all CR4 bits that the guest can modify without causing
751 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
752 * a VM-exit when the guest attempts to modify them when executing using
753 * hardware-assisted VMX.
754 *
755 * When a feature is not exposed to the guest (and may be present on the host),
756 * we want to intercept guest modifications to the bit so we can emulate proper
757 * behavior (e.g., #GP).
758 *
759 * Furthermore, only modifications to those bits that don't require immediate
760 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
761 * depends on CR3 which might not always be the guest value while executing
762 * using hardware-assisted VMX.
763 */
764 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
765 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
766#ifdef IN_NEM_DARWIN
767 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
768#endif
769 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
770
771 /*
772 * Paranoia.
773 * Ensure features exposed to the guest are present on the host.
774 */
775 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
776#ifdef IN_NEM_DARWIN
777 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
778#endif
779 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
780
781 uint64_t const fGstMask = X86_CR4_PVI
782 | X86_CR4_TSD
783 | X86_CR4_DE
784 | X86_CR4_MCE
785 | X86_CR4_PCE
786 | X86_CR4_OSXMMEEXCPT
787 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
788#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
789 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
790 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
791#endif
792 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
793 return ~fGstMask;
794}
795
796
797/**
798 * Adds one or more exceptions to the exception bitmap and commits it to the current
799 * VMCS.
800 *
801 * @param pVCpu The cross context virtual CPU structure.
802 * @param pVmxTransient The VMX-transient structure.
803 * @param uXcptMask The exception(s) to add.
804 */
805static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
806{
807 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
808 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
809 if ((uXcptBitmap & uXcptMask) != uXcptMask)
810 {
811 uXcptBitmap |= uXcptMask;
812 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
813 AssertRC(rc);
814 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
815 }
816}
817
818
819/**
820 * Adds an exception to the exception bitmap and commits it to the current VMCS.
821 *
822 * @param pVCpu The cross context virtual CPU structure.
823 * @param pVmxTransient The VMX-transient structure.
824 * @param uXcpt The exception to add.
825 */
826static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
827{
828 Assert(uXcpt <= X86_XCPT_LAST);
829 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
830}
831
832
833/**
834 * Remove one or more exceptions from the exception bitmap and commits it to the
835 * current VMCS.
836 *
837 * This takes care of not removing the exception intercept if a nested-guest
838 * requires the exception to be intercepted.
839 *
840 * @returns VBox status code.
841 * @param pVCpu The cross context virtual CPU structure.
842 * @param pVmxTransient The VMX-transient structure.
843 * @param uXcptMask The exception(s) to remove.
844 */
845static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
846{
847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
848 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
849 if (u32XcptBitmap & uXcptMask)
850 {
851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
852 if (!pVmxTransient->fIsNestedGuest)
853 { /* likely */ }
854 else
855 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
856#endif
857#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
858 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
859 | RT_BIT(X86_XCPT_DE)
860 | RT_BIT(X86_XCPT_NM)
861 | RT_BIT(X86_XCPT_TS)
862 | RT_BIT(X86_XCPT_UD)
863 | RT_BIT(X86_XCPT_NP)
864 | RT_BIT(X86_XCPT_SS)
865 | RT_BIT(X86_XCPT_GP)
866 | RT_BIT(X86_XCPT_PF)
867 | RT_BIT(X86_XCPT_MF));
868#elif defined(HMVMX_ALWAYS_TRAP_PF)
869 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
870#endif
871 if (uXcptMask)
872 {
873 /* Validate we are not removing any essential exception intercepts. */
874#ifndef IN_NEM_DARWIN
875 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
876#else
877 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
878#endif
879 NOREF(pVCpu);
880 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
881 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
882
883 /* Remove it from the exception bitmap. */
884 u32XcptBitmap &= ~uXcptMask;
885
886 /* Commit and update the cache if necessary. */
887 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
888 {
889 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
890 AssertRC(rc);
891 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
892 }
893 }
894 }
895 return VINF_SUCCESS;
896}
897
898
899/**
900 * Remove an exceptions from the exception bitmap and commits it to the current
901 * VMCS.
902 *
903 * @returns VBox status code.
904 * @param pVCpu The cross context virtual CPU structure.
905 * @param pVmxTransient The VMX-transient structure.
906 * @param uXcpt The exception to remove.
907 */
908static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
909{
910 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
911}
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
914
915/**
916 * Loads the shadow VMCS specified by the VMCS info. object.
917 *
918 * @returns VBox status code.
919 * @param pVmcsInfo The VMCS info. object.
920 *
921 * @remarks Can be called with interrupts disabled.
922 */
923static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
924{
925 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
926 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
927
928 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
929 if (RT_SUCCESS(rc))
930 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
931 return rc;
932}
933
934
935/**
936 * Clears the shadow VMCS specified by the VMCS info. object.
937 *
938 * @returns VBox status code.
939 * @param pVmcsInfo The VMCS info. object.
940 *
941 * @remarks Can be called with interrupts disabled.
942 */
943static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
944{
945 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
946 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
947
948 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
949 if (RT_SUCCESS(rc))
950 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
951 return rc;
952}
953
954
955/**
956 * Switches from and to the specified VMCSes.
957 *
958 * @returns VBox status code.
959 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
960 * @param pVmcsInfoTo The VMCS info. object we are switching to.
961 *
962 * @remarks Called with interrupts disabled.
963 */
964static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
965{
966 /*
967 * Clear the VMCS we are switching out if it has not already been cleared.
968 * This will sync any CPU internal data back to the VMCS.
969 */
970 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
971 {
972 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
973 if (RT_SUCCESS(rc))
974 {
975 /*
976 * The shadow VMCS, if any, would not be active at this point since we
977 * would have cleared it while importing the virtual hardware-virtualization
978 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
979 * clear the shadow VMCS here, just assert for safety.
980 */
981 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
982 }
983 else
984 return rc;
985 }
986
987 /*
988 * Clear the VMCS we are switching to if it has not already been cleared.
989 * This will initialize the VMCS launch state to "clear" required for loading it.
990 *
991 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
992 */
993 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
994 {
995 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
996 if (RT_SUCCESS(rc))
997 { /* likely */ }
998 else
999 return rc;
1000 }
1001
1002 /*
1003 * Finally, load the VMCS we are switching to.
1004 */
1005 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1006}
1007
1008
1009/**
1010 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1011 * caller.
1012 *
1013 * @returns VBox status code.
1014 * @param pVCpu The cross context virtual CPU structure.
1015 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1016 * true) or guest VMCS (pass false).
1017 */
1018static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1019{
1020 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1021 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1022
1023 PVMXVMCSINFO pVmcsInfoFrom;
1024 PVMXVMCSINFO pVmcsInfoTo;
1025 if (fSwitchToNstGstVmcs)
1026 {
1027 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1028 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1029 }
1030 else
1031 {
1032 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1033 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1034 }
1035
1036 /*
1037 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1038 * preemption hook code path acquires the current VMCS.
1039 */
1040 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1041
1042 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1043 if (RT_SUCCESS(rc))
1044 {
1045 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1046 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1047
1048 /*
1049 * If we are switching to a VMCS that was executed on a different host CPU or was
1050 * never executed before, flag that we need to export the host state before executing
1051 * guest/nested-guest code using hardware-assisted VMX.
1052 *
1053 * This could probably be done in a preemptible context since the preemption hook
1054 * will flag the necessary change in host context. However, since preemption is
1055 * already disabled and to avoid making assumptions about host specific code in
1056 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1057 * disabled.
1058 */
1059 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1060 { /* likely */ }
1061 else
1062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1063
1064 ASMSetFlags(fEFlags);
1065
1066 /*
1067 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1068 * flag that we need to update the host MSR values there. Even if we decide in the
1069 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1070 * if its content differs, we would have to update the host MSRs anyway.
1071 */
1072 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1073 }
1074 else
1075 ASMSetFlags(fEFlags);
1076 return rc;
1077}
1078
1079#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1080#ifdef VBOX_STRICT
1081
1082/**
1083 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1084 * transient structure.
1085 *
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param pVmxTransient The VMX-transient structure.
1088 */
1089DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1090{
1091 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1092 AssertRC(rc);
1093}
1094
1095
1096/**
1097 * Reads the VM-entry exception error code field from the VMCS into
1098 * the VMX transient structure.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure.
1101 * @param pVmxTransient The VMX-transient structure.
1102 */
1103DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1104{
1105 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1106 AssertRC(rc);
1107}
1108
1109
1110/**
1111 * Reads the VM-entry exception error code field from the VMCS into
1112 * the VMX transient structure.
1113 *
1114 * @param pVCpu The cross context virtual CPU structure.
1115 * @param pVmxTransient The VMX-transient structure.
1116 */
1117DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1118{
1119 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1120 AssertRC(rc);
1121}
1122
1123#endif /* VBOX_STRICT */
1124
1125
1126/**
1127 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1128 *
1129 * Don't call directly unless the it's likely that some or all of the fields
1130 * given in @a a_fReadMask have already been read.
1131 *
1132 * @tparam a_fReadMask The fields to read.
1133 * @param pVCpu The cross context virtual CPU structure.
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136template<uint32_t const a_fReadMask>
1137static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1138{
1139 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1140 | HMVMX_READ_EXIT_INSTR_LEN
1141 | HMVMX_READ_EXIT_INSTR_INFO
1142 | HMVMX_READ_IDT_VECTORING_INFO
1143 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1144 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1145 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1146 | HMVMX_READ_GUEST_LINEAR_ADDR
1147 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1148 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1149 )) == 0);
1150
1151 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1152 {
1153 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1154
1155 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1156 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1157 {
1158 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1159 AssertRC(rc);
1160 }
1161 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1162 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1163 {
1164 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1165 AssertRC(rc);
1166 }
1167 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1168 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1169 {
1170 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1171 AssertRC(rc);
1172 }
1173 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1174 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1175 {
1176 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1177 AssertRC(rc);
1178 }
1179 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1180 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1181 {
1182 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1183 AssertRC(rc);
1184 }
1185 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1186 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1187 {
1188 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1189 AssertRC(rc);
1190 }
1191 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1192 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1193 {
1194 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1195 AssertRC(rc);
1196 }
1197 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1198 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1199 {
1200 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1201 AssertRC(rc);
1202 }
1203 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1204 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1205 {
1206 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1207 AssertRC(rc);
1208 }
1209 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1210 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1211 {
1212 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1213 AssertRC(rc);
1214 }
1215
1216 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1217 }
1218}
1219
1220
1221/**
1222 * Reads VMCS fields into the VMXTRANSIENT structure.
1223 *
1224 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1225 * generating an optimized read sequences w/o any conditionals between in
1226 * non-strict builds.
1227 *
1228 * @tparam a_fReadMask The fields to read. One or more of the
1229 * HMVMX_READ_XXX fields ORed together.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 * @param pVmxTransient The VMX-transient structure.
1232 */
1233template<uint32_t const a_fReadMask>
1234DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1235{
1236 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1237 | HMVMX_READ_EXIT_INSTR_LEN
1238 | HMVMX_READ_EXIT_INSTR_INFO
1239 | HMVMX_READ_IDT_VECTORING_INFO
1240 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1241 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1242 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1243 | HMVMX_READ_GUEST_LINEAR_ADDR
1244 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1245 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1246 )) == 0);
1247
1248 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1249 {
1250 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1251 {
1252 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1253 AssertRC(rc);
1254 }
1255 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1256 {
1257 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1258 AssertRC(rc);
1259 }
1260 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1261 {
1262 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1263 AssertRC(rc);
1264 }
1265 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1266 {
1267 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1268 AssertRC(rc);
1269 }
1270 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1271 {
1272 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1273 AssertRC(rc);
1274 }
1275 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1276 {
1277 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1278 AssertRC(rc);
1279 }
1280 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1281 {
1282 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1283 AssertRC(rc);
1284 }
1285 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1286 {
1287 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1288 AssertRC(rc);
1289 }
1290 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1291 {
1292 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1293 AssertRC(rc);
1294 }
1295 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1296 {
1297 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1298 AssertRC(rc);
1299 }
1300
1301 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1302 }
1303 else
1304 {
1305 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1306 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1307 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1308 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1309 }
1310}
1311
1312
1313#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1314/**
1315 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1316 *
1317 * @param pVCpu The cross context virtual CPU structure.
1318 * @param pVmxTransient The VMX-transient structure.
1319 */
1320static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1321{
1322 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1323 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1324 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1325 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1326 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1327 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1328 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1329 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1330 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1331 AssertRC(rc);
1332 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1333 | HMVMX_READ_EXIT_INSTR_LEN
1334 | HMVMX_READ_EXIT_INSTR_INFO
1335 | HMVMX_READ_IDT_VECTORING_INFO
1336 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1337 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1338 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1339 | HMVMX_READ_GUEST_LINEAR_ADDR
1340 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1341}
1342#endif
1343
1344/**
1345 * Verifies that our cached values of the VMCS fields are all consistent with
1346 * what's actually present in the VMCS.
1347 *
1348 * @returns VBox status code.
1349 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1350 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1351 * VMCS content. HMCPU error-field is
1352 * updated, see VMX_VCI_XXX.
1353 * @param pVCpu The cross context virtual CPU structure.
1354 * @param pVmcsInfo The VMCS info. object.
1355 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1356 */
1357static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1358{
1359 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1360
1361 uint32_t u32Val;
1362 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1363 AssertRC(rc);
1364 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1365 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1366 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1367 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1368
1369 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1370 AssertRC(rc);
1371 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1372 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1373 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1374 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1375
1376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1377 AssertRC(rc);
1378 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1379 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1380 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1381 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1382
1383 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1384 AssertRC(rc);
1385 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1386 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1387 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1388 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1389
1390 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1391 {
1392 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1395 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398 }
1399
1400 uint64_t u64Val;
1401 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1402 {
1403 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1404 AssertRC(rc);
1405 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1406 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1407 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1408 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1409 }
1410
1411 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1414 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417
1418 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1419 AssertRC(rc);
1420 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1421 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1422 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1423 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1424
1425 NOREF(pcszVmcs);
1426 return VINF_SUCCESS;
1427}
1428
1429
1430/**
1431 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1432 * VMCS.
1433 *
1434 * This is typically required when the guest changes paging mode.
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The cross context virtual CPU structure.
1438 * @param pVmxTransient The VMX-transient structure.
1439 *
1440 * @remarks Requires EFER.
1441 * @remarks No-long-jump zone!!!
1442 */
1443static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1444{
1445 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1446 {
1447 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1448 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1449
1450 /*
1451 * VM-entry controls.
1452 */
1453 {
1454 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1455 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1456
1457 /*
1458 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1459 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1460 *
1461 * For nested-guests, this is a mandatory VM-entry control. It's also
1462 * required because we do not want to leak host bits to the nested-guest.
1463 */
1464 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1465
1466 /*
1467 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1468 *
1469 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1470 * required to get the nested-guest working with hardware-assisted VMX execution.
1471 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1472 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1473 * here rather than while merging the guest VMCS controls.
1474 */
1475 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1476 {
1477 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1478 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1479 }
1480 else
1481 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1482
1483 /*
1484 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1485 *
1486 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1487 * regardless of whether the nested-guest VMCS specifies it because we are free to
1488 * load whatever MSRs we require and we do not need to modify the guest visible copy
1489 * of the VM-entry MSR load area.
1490 */
1491 if ( g_fHmVmxSupportsVmcsEfer
1492#ifndef IN_NEM_DARWIN
1493 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1494#endif
1495 )
1496 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1497 else
1498 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1499
1500 /*
1501 * The following should -not- be set (since we're not in SMM mode):
1502 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1503 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1504 */
1505
1506 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1507 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1508
1509 if ((fVal & fZap) == fVal)
1510 { /* likely */ }
1511 else
1512 {
1513 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1514 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1515 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1516 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1517 }
1518
1519 /* Commit it to the VMCS. */
1520 if (pVmcsInfo->u32EntryCtls != fVal)
1521 {
1522 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1523 AssertRC(rc);
1524 pVmcsInfo->u32EntryCtls = fVal;
1525 }
1526 }
1527
1528 /*
1529 * VM-exit controls.
1530 */
1531 {
1532 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1533 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1534
1535 /*
1536 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1537 * supported the 1-setting of this bit.
1538 *
1539 * For nested-guests, we set the "save debug controls" as the converse
1540 * "load debug controls" is mandatory for nested-guests anyway.
1541 */
1542 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1543
1544 /*
1545 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1546 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1547 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1548 * vmxHCExportHostMsrs().
1549 *
1550 * For nested-guests, we always set this bit as we do not support 32-bit
1551 * hosts.
1552 */
1553 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1554
1555#ifndef IN_NEM_DARWIN
1556 /*
1557 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1558 *
1559 * For nested-guests, we should use the "save IA32_EFER" control if we also
1560 * used the "load IA32_EFER" control while exporting VM-entry controls.
1561 */
1562 if ( g_fHmVmxSupportsVmcsEfer
1563 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1564 {
1565 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1566 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1567 }
1568#endif
1569
1570 /*
1571 * Enable saving of the VMX-preemption timer value on VM-exit.
1572 * For nested-guests, currently not exposed/used.
1573 */
1574 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1575 * the timer value. */
1576 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1577 {
1578 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1579 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1580 }
1581
1582 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1583 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1584
1585 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1586 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1587 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1588
1589 if ((fVal & fZap) == fVal)
1590 { /* likely */ }
1591 else
1592 {
1593 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1594 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1595 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1596 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1597 }
1598
1599 /* Commit it to the VMCS. */
1600 if (pVmcsInfo->u32ExitCtls != fVal)
1601 {
1602 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1603 AssertRC(rc);
1604 pVmcsInfo->u32ExitCtls = fVal;
1605 }
1606 }
1607
1608 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1609 }
1610 return VINF_SUCCESS;
1611}
1612
1613
1614/**
1615 * Sets the TPR threshold in the VMCS.
1616 *
1617 * @param pVCpu The cross context virtual CPU structure.
1618 * @param pVmcsInfo The VMCS info. object.
1619 * @param u32TprThreshold The TPR threshold (task-priority class only).
1620 */
1621DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1622{
1623 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1624 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1625 RT_NOREF(pVmcsInfo);
1626 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1627 AssertRC(rc);
1628}
1629
1630
1631/**
1632 * Exports the guest APIC TPR state into the VMCS.
1633 *
1634 * @param pVCpu The cross context virtual CPU structure.
1635 * @param pVmxTransient The VMX-transient structure.
1636 *
1637 * @remarks No-long-jump zone!!!
1638 */
1639static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1640{
1641 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1642 {
1643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1644
1645 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1646 if (!pVmxTransient->fIsNestedGuest)
1647 {
1648 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1649 && APICIsEnabled(pVCpu))
1650 {
1651 /*
1652 * Setup TPR shadowing.
1653 */
1654 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1655 {
1656 bool fPendingIntr = false;
1657 uint8_t u8Tpr = 0;
1658 uint8_t u8PendingIntr = 0;
1659 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1660 AssertRC(rc);
1661
1662 /*
1663 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1664 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1665 * priority of the pending interrupt so we can deliver the interrupt. If there
1666 * are no interrupts pending, set threshold to 0 to not cause any
1667 * TPR-below-threshold VM-exits.
1668 */
1669 uint32_t u32TprThreshold = 0;
1670 if (fPendingIntr)
1671 {
1672 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1673 (which is the Task-Priority Class). */
1674 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1675 const uint8_t u8TprPriority = u8Tpr >> 4;
1676 if (u8PendingPriority <= u8TprPriority)
1677 u32TprThreshold = u8PendingPriority;
1678 }
1679
1680 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1681 }
1682 }
1683 }
1684 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1685 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1686 }
1687}
1688
1689
1690/**
1691 * Gets the guest interruptibility-state and updates related force-flags.
1692 *
1693 * @returns Guest's interruptibility-state.
1694 * @param pVCpu The cross context virtual CPU structure.
1695 *
1696 * @remarks No-long-jump zone!!!
1697 */
1698static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1699{
1700 uint32_t fIntrState;
1701
1702 /*
1703 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1704 */
1705 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1706 fIntrState = 0;
1707 else
1708 {
1709 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1710 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1711
1712 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1713 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1714 else
1715 {
1716 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1717
1718 /* Block-by-STI must not be set when interrupts are disabled. */
1719 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1720 }
1721 }
1722
1723 /*
1724 * Check if we should inhibit NMI delivery.
1725 */
1726 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1727 { /* likely */ }
1728 else
1729 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1730
1731 /*
1732 * Validate.
1733 */
1734 /* We don't support block-by-SMI yet.*/
1735 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1736
1737 return fIntrState;
1738}
1739
1740
1741/**
1742 * Exports the exception intercepts required for guest execution in the VMCS.
1743 *
1744 * @param pVCpu The cross context virtual CPU structure.
1745 * @param pVmxTransient The VMX-transient structure.
1746 *
1747 * @remarks No-long-jump zone!!!
1748 */
1749static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1750{
1751 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1752 {
1753 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1754 if ( !pVmxTransient->fIsNestedGuest
1755 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1756 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1757 else
1758 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1759
1760 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1761 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1762 }
1763}
1764
1765
1766/**
1767 * Exports the guest's RIP into the guest-state area in the VMCS.
1768 *
1769 * @param pVCpu The cross context virtual CPU structure.
1770 *
1771 * @remarks No-long-jump zone!!!
1772 */
1773static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1774{
1775 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1776 {
1777 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1778
1779 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1780 AssertRC(rc);
1781
1782 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1783 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1784 }
1785}
1786
1787
1788/**
1789 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1790 *
1791 * @param pVCpu The cross context virtual CPU structure.
1792 * @param pVmxTransient The VMX-transient structure.
1793 *
1794 * @remarks No-long-jump zone!!!
1795 */
1796static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1797{
1798 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1799 {
1800 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1801
1802 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1803 Let us assert it as such and use 32-bit VMWRITE. */
1804 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1805 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1806 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1807 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1808
1809#ifndef IN_NEM_DARWIN
1810 /*
1811 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1812 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1813 * can run the real-mode guest code under Virtual 8086 mode.
1814 */
1815 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1816 if (pVmcsInfo->RealMode.fRealOnV86Active)
1817 {
1818 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1819 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1820 Assert(!pVmxTransient->fIsNestedGuest);
1821 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1822 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1823 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1824 }
1825#else
1826 RT_NOREF(pVmxTransient);
1827#endif
1828
1829 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1830 AssertRC(rc);
1831
1832 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1833 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1834 }
1835}
1836
1837
1838#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1839/**
1840 * Copies the nested-guest VMCS to the shadow VMCS.
1841 *
1842 * @returns VBox status code.
1843 * @param pVCpu The cross context virtual CPU structure.
1844 * @param pVmcsInfo The VMCS info. object.
1845 *
1846 * @remarks No-long-jump zone!!!
1847 */
1848static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1849{
1850 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1851 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1852
1853 /*
1854 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1855 * current VMCS, as we may try saving guest lazy MSRs.
1856 *
1857 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1858 * calling the import VMCS code which is currently performing the guest MSR reads
1859 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1860 * and the rest of the VMX leave session machinery.
1861 */
1862 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1863
1864 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1865 if (RT_SUCCESS(rc))
1866 {
1867 /*
1868 * Copy all guest read/write VMCS fields.
1869 *
1870 * We don't check for VMWRITE failures here for performance reasons and
1871 * because they are not expected to fail, barring irrecoverable conditions
1872 * like hardware errors.
1873 */
1874 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1875 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1876 {
1877 uint64_t u64Val;
1878 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1879 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1880 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1881 }
1882
1883 /*
1884 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1885 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1886 */
1887 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1888 {
1889 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1890 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1891 {
1892 uint64_t u64Val;
1893 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1894 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1895 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1896 }
1897 }
1898
1899 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1900 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1901 }
1902
1903 ASMSetFlags(fEFlags);
1904 return rc;
1905}
1906
1907
1908/**
1909 * Copies the shadow VMCS to the nested-guest VMCS.
1910 *
1911 * @returns VBox status code.
1912 * @param pVCpu The cross context virtual CPU structure.
1913 * @param pVmcsInfo The VMCS info. object.
1914 *
1915 * @remarks Called with interrupts disabled.
1916 */
1917static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1918{
1919 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1920 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1921 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1922
1923 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1924 if (RT_SUCCESS(rc))
1925 {
1926 /*
1927 * Copy guest read/write fields from the shadow VMCS.
1928 * Guest read-only fields cannot be modified, so no need to copy them.
1929 *
1930 * We don't check for VMREAD failures here for performance reasons and
1931 * because they are not expected to fail, barring irrecoverable conditions
1932 * like hardware errors.
1933 */
1934 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1935 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1936 {
1937 uint64_t u64Val;
1938 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1939 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1940 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1941 }
1942
1943 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1944 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1945 }
1946 return rc;
1947}
1948
1949
1950/**
1951 * Enables VMCS shadowing for the given VMCS info. object.
1952 *
1953 * @param pVCpu The cross context virtual CPU structure.
1954 * @param pVmcsInfo The VMCS info. object.
1955 *
1956 * @remarks No-long-jump zone!!!
1957 */
1958static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1959{
1960 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1961 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1962 {
1963 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1964 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1965 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1966 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1967 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1968 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1969 Log4Func(("Enabled\n"));
1970 }
1971}
1972
1973
1974/**
1975 * Disables VMCS shadowing for the given VMCS info. object.
1976 *
1977 * @param pVCpu The cross context virtual CPU structure.
1978 * @param pVmcsInfo The VMCS info. object.
1979 *
1980 * @remarks No-long-jump zone!!!
1981 */
1982static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1983{
1984 /*
1985 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1986 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1987 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1988 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1989 *
1990 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
1991 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
1992 */
1993 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1994 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
1995 {
1996 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
1997 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1998 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
1999 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2000 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2001 Log4Func(("Disabled\n"));
2002 }
2003}
2004#endif
2005
2006
2007/**
2008 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2009 *
2010 * The guest FPU state is always pre-loaded hence we don't need to bother about
2011 * sharing FPU related CR0 bits between the guest and host.
2012 *
2013 * @returns VBox status code.
2014 * @param pVCpu The cross context virtual CPU structure.
2015 * @param pVmxTransient The VMX-transient structure.
2016 *
2017 * @remarks No-long-jump zone!!!
2018 */
2019static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2020{
2021 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2022 {
2023 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2024 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2025
2026 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2027 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2028 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2029 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2030 else
2031 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2032
2033 if (!pVmxTransient->fIsNestedGuest)
2034 {
2035 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2036 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2037 uint64_t const u64ShadowCr0 = u64GuestCr0;
2038 Assert(!RT_HI_U32(u64GuestCr0));
2039
2040 /*
2041 * Setup VT-x's view of the guest CR0.
2042 */
2043 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2044 if (VM_IS_VMX_NESTED_PAGING(pVM))
2045 {
2046#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2047 if (CPUMIsGuestPagingEnabled(pVCpu))
2048 {
2049 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2050 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2051 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2052 }
2053 else
2054 {
2055 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2056 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2057 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2058 }
2059
2060 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2061 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2062 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2063#endif
2064 }
2065 else
2066 {
2067 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2068 u64GuestCr0 |= X86_CR0_WP;
2069 }
2070
2071 /*
2072 * Guest FPU bits.
2073 *
2074 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2075 * using CR0.TS.
2076 *
2077 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2078 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2079 */
2080 u64GuestCr0 |= X86_CR0_NE;
2081
2082 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2083 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2084
2085 /*
2086 * Update exception intercepts.
2087 */
2088 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2089#ifndef IN_NEM_DARWIN
2090 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2091 {
2092 Assert(PDMVmmDevHeapIsEnabled(pVM));
2093 Assert(pVM->hm.s.vmx.pRealModeTSS);
2094 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2095 }
2096 else
2097#endif
2098 {
2099 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2100 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2101 if (fInterceptMF)
2102 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2103 }
2104
2105 /* Additional intercepts for debugging, define these yourself explicitly. */
2106#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2107 uXcptBitmap |= 0
2108 | RT_BIT(X86_XCPT_BP)
2109 | RT_BIT(X86_XCPT_DE)
2110 | RT_BIT(X86_XCPT_NM)
2111 | RT_BIT(X86_XCPT_TS)
2112 | RT_BIT(X86_XCPT_UD)
2113 | RT_BIT(X86_XCPT_NP)
2114 | RT_BIT(X86_XCPT_SS)
2115 | RT_BIT(X86_XCPT_GP)
2116 | RT_BIT(X86_XCPT_PF)
2117 | RT_BIT(X86_XCPT_MF)
2118 ;
2119#elif defined(HMVMX_ALWAYS_TRAP_PF)
2120 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2121#endif
2122 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2123 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2124 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2125 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2126 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2127
2128 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2129 u64GuestCr0 |= fSetCr0;
2130 u64GuestCr0 &= fZapCr0;
2131 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2132
2133 /* Commit the CR0 and related fields to the guest VMCS. */
2134 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2135 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2136 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2137 {
2138 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2139 AssertRC(rc);
2140 }
2141 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2142 {
2143 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2144 AssertRC(rc);
2145 }
2146
2147 /* Update our caches. */
2148 pVmcsInfo->u32ProcCtls = uProcCtls;
2149 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2150
2151 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2152 }
2153 else
2154 {
2155 /*
2156 * With nested-guests, we may have extended the guest/host mask here since we
2157 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2158 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2159 * originally supplied. We must copy those bits from the nested-guest CR0 into
2160 * the nested-guest CR0 read-shadow.
2161 */
2162 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2163 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2164 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2165 Assert(!RT_HI_U32(u64GuestCr0));
2166 Assert(u64GuestCr0 & X86_CR0_NE);
2167
2168 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2169 u64GuestCr0 |= fSetCr0;
2170 u64GuestCr0 &= fZapCr0;
2171 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2172
2173 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2174 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2175 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2176
2177 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2178 }
2179
2180 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2181 }
2182
2183 return VINF_SUCCESS;
2184}
2185
2186
2187/**
2188 * Exports the guest control registers (CR3, CR4) into the guest-state area
2189 * in the VMCS.
2190 *
2191 * @returns VBox strict status code.
2192 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2193 * without unrestricted guest access and the VMMDev is not presently
2194 * mapped (e.g. EFI32).
2195 *
2196 * @param pVCpu The cross context virtual CPU structure.
2197 * @param pVmxTransient The VMX-transient structure.
2198 *
2199 * @remarks No-long-jump zone!!!
2200 */
2201static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2202{
2203 int rc = VINF_SUCCESS;
2204 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2205
2206 /*
2207 * Guest CR2.
2208 * It's always loaded in the assembler code. Nothing to do here.
2209 */
2210
2211 /*
2212 * Guest CR3.
2213 */
2214 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2215 {
2216 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2217
2218 if (VM_IS_VMX_NESTED_PAGING(pVM))
2219 {
2220#ifndef IN_NEM_DARWIN
2221 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2222 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2223
2224 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2225 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2226 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2227 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2228
2229 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2230 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2231 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2232
2233 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2234 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2235 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2236 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2237 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2238 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2239 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2240
2241 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2242 AssertRC(rc);
2243#endif
2244
2245 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2246 uint64_t u64GuestCr3 = pCtx->cr3;
2247 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2248 || CPUMIsGuestPagingEnabledEx(pCtx))
2249 {
2250 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2251 if (CPUMIsGuestInPAEModeEx(pCtx))
2252 {
2253 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2254 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2256 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2257 }
2258
2259 /*
2260 * The guest's view of its CR3 is unblemished with nested paging when the
2261 * guest is using paging or we have unrestricted guest execution to handle
2262 * the guest when it's not using paging.
2263 */
2264 }
2265#ifndef IN_NEM_DARWIN
2266 else
2267 {
2268 /*
2269 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2270 * thinks it accesses physical memory directly, we use our identity-mapped
2271 * page table to map guest-linear to guest-physical addresses. EPT takes care
2272 * of translating it to host-physical addresses.
2273 */
2274 RTGCPHYS GCPhys;
2275 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2276
2277 /* We obtain it here every time as the guest could have relocated this PCI region. */
2278 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2279 if (RT_SUCCESS(rc))
2280 { /* likely */ }
2281 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2282 {
2283 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2284 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2285 }
2286 else
2287 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2288
2289 u64GuestCr3 = GCPhys;
2290 }
2291#endif
2292
2293 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2294 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2295 AssertRC(rc);
2296 }
2297 else
2298 {
2299 Assert(!pVmxTransient->fIsNestedGuest);
2300 /* Non-nested paging case, just use the hypervisor's CR3. */
2301 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2302
2303 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2304 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2305 AssertRC(rc);
2306 }
2307
2308 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2309 }
2310
2311 /*
2312 * Guest CR4.
2313 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2314 */
2315 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2316 {
2317 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2318 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2319
2320 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2321 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2322
2323 /*
2324 * With nested-guests, we may have extended the guest/host mask here (since we
2325 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2326 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2327 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2328 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2329 */
2330 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2331 uint64_t u64GuestCr4 = pCtx->cr4;
2332 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2333 ? pCtx->cr4
2334 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2335 Assert(!RT_HI_U32(u64GuestCr4));
2336
2337#ifndef IN_NEM_DARWIN
2338 /*
2339 * Setup VT-x's view of the guest CR4.
2340 *
2341 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2342 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2343 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2344 *
2345 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2346 */
2347 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2348 {
2349 Assert(pVM->hm.s.vmx.pRealModeTSS);
2350 Assert(PDMVmmDevHeapIsEnabled(pVM));
2351 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2352 }
2353#endif
2354
2355 if (VM_IS_VMX_NESTED_PAGING(pVM))
2356 {
2357 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2358 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2359 {
2360 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2361 u64GuestCr4 |= X86_CR4_PSE;
2362 /* Our identity mapping is a 32-bit page directory. */
2363 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2364 }
2365 /* else use guest CR4.*/
2366 }
2367 else
2368 {
2369 Assert(!pVmxTransient->fIsNestedGuest);
2370
2371 /*
2372 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2373 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2374 */
2375 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2376 {
2377 case PGMMODE_REAL: /* Real-mode. */
2378 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2379 case PGMMODE_32_BIT: /* 32-bit paging. */
2380 {
2381 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2382 break;
2383 }
2384
2385 case PGMMODE_PAE: /* PAE paging. */
2386 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2387 {
2388 u64GuestCr4 |= X86_CR4_PAE;
2389 break;
2390 }
2391
2392 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2393 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2394 {
2395#ifdef VBOX_WITH_64_BITS_GUESTS
2396 /* For our assumption in vmxHCShouldSwapEferMsr. */
2397 Assert(u64GuestCr4 & X86_CR4_PAE);
2398 break;
2399#endif
2400 }
2401 default:
2402 AssertFailed();
2403 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2404 }
2405 }
2406
2407 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2408 u64GuestCr4 |= fSetCr4;
2409 u64GuestCr4 &= fZapCr4;
2410
2411 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2412 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2413 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2414
2415#ifndef IN_NEM_DARWIN
2416 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2417 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2418 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2419 {
2420 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2421 hmR0VmxUpdateStartVmFunction(pVCpu);
2422 }
2423#endif
2424
2425 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2426
2427 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2428 }
2429 return rc;
2430}
2431
2432
2433#ifdef VBOX_STRICT
2434/**
2435 * Strict function to validate segment registers.
2436 *
2437 * @param pVCpu The cross context virtual CPU structure.
2438 * @param pVmcsInfo The VMCS info. object.
2439 *
2440 * @remarks Will import guest CR0 on strict builds during validation of
2441 * segments.
2442 */
2443static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2444{
2445 /*
2446 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2447 *
2448 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2449 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2450 * unusable bit and doesn't change the guest-context value.
2451 */
2452 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2453 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2454 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2455 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2456 && ( !CPUMIsGuestInRealModeEx(pCtx)
2457 && !CPUMIsGuestInV86ModeEx(pCtx)))
2458 {
2459 /* Protected mode checks */
2460 /* CS */
2461 Assert(pCtx->cs.Attr.n.u1Present);
2462 Assert(!(pCtx->cs.Attr.u & 0xf00));
2463 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2464 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2465 || !(pCtx->cs.Attr.n.u1Granularity));
2466 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2467 || (pCtx->cs.Attr.n.u1Granularity));
2468 /* CS cannot be loaded with NULL in protected mode. */
2469 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2470 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2471 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2472 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2473 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2474 else
2475 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2476 /* SS */
2477 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2478 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2479 if ( !(pCtx->cr0 & X86_CR0_PE)
2480 || pCtx->cs.Attr.n.u4Type == 3)
2481 {
2482 Assert(!pCtx->ss.Attr.n.u2Dpl);
2483 }
2484 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2485 {
2486 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2487 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2488 Assert(pCtx->ss.Attr.n.u1Present);
2489 Assert(!(pCtx->ss.Attr.u & 0xf00));
2490 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2491 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2492 || !(pCtx->ss.Attr.n.u1Granularity));
2493 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2494 || (pCtx->ss.Attr.n.u1Granularity));
2495 }
2496 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2497 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2498 {
2499 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2500 Assert(pCtx->ds.Attr.n.u1Present);
2501 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2502 Assert(!(pCtx->ds.Attr.u & 0xf00));
2503 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2504 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2505 || !(pCtx->ds.Attr.n.u1Granularity));
2506 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2507 || (pCtx->ds.Attr.n.u1Granularity));
2508 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2509 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2510 }
2511 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2512 {
2513 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2514 Assert(pCtx->es.Attr.n.u1Present);
2515 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2516 Assert(!(pCtx->es.Attr.u & 0xf00));
2517 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2518 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2519 || !(pCtx->es.Attr.n.u1Granularity));
2520 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2521 || (pCtx->es.Attr.n.u1Granularity));
2522 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2523 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2524 }
2525 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2526 {
2527 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2528 Assert(pCtx->fs.Attr.n.u1Present);
2529 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2530 Assert(!(pCtx->fs.Attr.u & 0xf00));
2531 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2532 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2533 || !(pCtx->fs.Attr.n.u1Granularity));
2534 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2535 || (pCtx->fs.Attr.n.u1Granularity));
2536 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2537 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2538 }
2539 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2540 {
2541 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2542 Assert(pCtx->gs.Attr.n.u1Present);
2543 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2544 Assert(!(pCtx->gs.Attr.u & 0xf00));
2545 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2546 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2547 || !(pCtx->gs.Attr.n.u1Granularity));
2548 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2549 || (pCtx->gs.Attr.n.u1Granularity));
2550 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2551 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2552 }
2553 /* 64-bit capable CPUs. */
2554 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2555 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2556 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2557 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2558 }
2559 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2560 || ( CPUMIsGuestInRealModeEx(pCtx)
2561 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2562 {
2563 /* Real and v86 mode checks. */
2564 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2565 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2566#ifndef IN_NEM_DARWIN
2567 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2568 {
2569 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2570 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2571 }
2572 else
2573#endif
2574 {
2575 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2576 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2577 }
2578
2579 /* CS */
2580 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2581 Assert(pCtx->cs.u32Limit == 0xffff);
2582 Assert(u32CSAttr == 0xf3);
2583 /* SS */
2584 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2585 Assert(pCtx->ss.u32Limit == 0xffff);
2586 Assert(u32SSAttr == 0xf3);
2587 /* DS */
2588 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2589 Assert(pCtx->ds.u32Limit == 0xffff);
2590 Assert(u32DSAttr == 0xf3);
2591 /* ES */
2592 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2593 Assert(pCtx->es.u32Limit == 0xffff);
2594 Assert(u32ESAttr == 0xf3);
2595 /* FS */
2596 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2597 Assert(pCtx->fs.u32Limit == 0xffff);
2598 Assert(u32FSAttr == 0xf3);
2599 /* GS */
2600 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2601 Assert(pCtx->gs.u32Limit == 0xffff);
2602 Assert(u32GSAttr == 0xf3);
2603 /* 64-bit capable CPUs. */
2604 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2605 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2606 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2607 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2608 }
2609}
2610#endif /* VBOX_STRICT */
2611
2612
2613/**
2614 * Exports a guest segment register into the guest-state area in the VMCS.
2615 *
2616 * @returns VBox status code.
2617 * @param pVCpu The cross context virtual CPU structure.
2618 * @param pVmcsInfo The VMCS info. object.
2619 * @param iSegReg The segment register number (X86_SREG_XXX).
2620 * @param pSelReg Pointer to the segment selector.
2621 *
2622 * @remarks No-long-jump zone!!!
2623 */
2624static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2625{
2626 Assert(iSegReg < X86_SREG_COUNT);
2627
2628 uint32_t u32Access = pSelReg->Attr.u;
2629#ifndef IN_NEM_DARWIN
2630 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2631#endif
2632 {
2633 /*
2634 * The way to differentiate between whether this is really a null selector or was just
2635 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2636 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2637 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2638 * NULL selectors loaded in protected-mode have their attribute as 0.
2639 */
2640 if (u32Access)
2641 { }
2642 else
2643 u32Access = X86DESCATTR_UNUSABLE;
2644 }
2645#ifndef IN_NEM_DARWIN
2646 else
2647 {
2648 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2649 u32Access = 0xf3;
2650 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2651 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2652 RT_NOREF_PV(pVCpu);
2653 }
2654#else
2655 RT_NOREF(pVmcsInfo);
2656#endif
2657
2658 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2659 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2660 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2661
2662 /*
2663 * Commit it to the VMCS.
2664 */
2665 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2666 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2667 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2668 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2669 return VINF_SUCCESS;
2670}
2671
2672
2673/**
2674 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2675 * area in the VMCS.
2676 *
2677 * @returns VBox status code.
2678 * @param pVCpu The cross context virtual CPU structure.
2679 * @param pVmxTransient The VMX-transient structure.
2680 *
2681 * @remarks Will import guest CR0 on strict builds during validation of
2682 * segments.
2683 * @remarks No-long-jump zone!!!
2684 */
2685static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2686{
2687 int rc = VERR_INTERNAL_ERROR_5;
2688#ifndef IN_NEM_DARWIN
2689 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2690#endif
2691 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2692 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2693#ifndef IN_NEM_DARWIN
2694 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2695#endif
2696
2697 /*
2698 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2699 */
2700 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2701 {
2702 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2703 {
2704 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2705#ifndef IN_NEM_DARWIN
2706 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2707 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2708#endif
2709 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2710 AssertRC(rc);
2711 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2712 }
2713
2714 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2715 {
2716 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2717#ifndef IN_NEM_DARWIN
2718 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2719 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2720#endif
2721 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2722 AssertRC(rc);
2723 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2724 }
2725
2726 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2727 {
2728 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2729#ifndef IN_NEM_DARWIN
2730 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2731 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2732#endif
2733 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2734 AssertRC(rc);
2735 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2736 }
2737
2738 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2739 {
2740 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2741#ifndef IN_NEM_DARWIN
2742 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2743 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2744#endif
2745 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2746 AssertRC(rc);
2747 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2748 }
2749
2750 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2751 {
2752 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2753#ifndef IN_NEM_DARWIN
2754 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2755 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2756#endif
2757 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2758 AssertRC(rc);
2759 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2760 }
2761
2762 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2763 {
2764 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2765#ifndef IN_NEM_DARWIN
2766 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2767 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2768#endif
2769 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2770 AssertRC(rc);
2771 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2772 }
2773
2774#ifdef VBOX_STRICT
2775 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2776#endif
2777 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2778 pCtx->cs.Attr.u));
2779 }
2780
2781 /*
2782 * Guest TR.
2783 */
2784 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2785 {
2786 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2787
2788 /*
2789 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2790 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2791 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2792 */
2793 uint16_t u16Sel;
2794 uint32_t u32Limit;
2795 uint64_t u64Base;
2796 uint32_t u32AccessRights;
2797#ifndef IN_NEM_DARWIN
2798 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2799#endif
2800 {
2801 u16Sel = pCtx->tr.Sel;
2802 u32Limit = pCtx->tr.u32Limit;
2803 u64Base = pCtx->tr.u64Base;
2804 u32AccessRights = pCtx->tr.Attr.u;
2805 }
2806#ifndef IN_NEM_DARWIN
2807 else
2808 {
2809 Assert(!pVmxTransient->fIsNestedGuest);
2810 Assert(pVM->hm.s.vmx.pRealModeTSS);
2811 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2812
2813 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2814 RTGCPHYS GCPhys;
2815 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2816 AssertRCReturn(rc, rc);
2817
2818 X86DESCATTR DescAttr;
2819 DescAttr.u = 0;
2820 DescAttr.n.u1Present = 1;
2821 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2822
2823 u16Sel = 0;
2824 u32Limit = HM_VTX_TSS_SIZE;
2825 u64Base = GCPhys;
2826 u32AccessRights = DescAttr.u;
2827 }
2828#endif
2829
2830 /* Validate. */
2831 Assert(!(u16Sel & RT_BIT(2)));
2832 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2833 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2834 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2835 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2836 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2837 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2838 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2839 Assert( (u32Limit & 0xfff) == 0xfff
2840 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2841 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2842 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2843
2844 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2845 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2846 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2847 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2848
2849 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2850 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2851 }
2852
2853 /*
2854 * Guest GDTR.
2855 */
2856 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2857 {
2858 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2859
2860 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2861 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2862
2863 /* Validate. */
2864 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2865
2866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2867 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2868 }
2869
2870 /*
2871 * Guest LDTR.
2872 */
2873 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2874 {
2875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2876
2877 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2878 uint32_t u32Access;
2879 if ( !pVmxTransient->fIsNestedGuest
2880 && !pCtx->ldtr.Attr.u)
2881 u32Access = X86DESCATTR_UNUSABLE;
2882 else
2883 u32Access = pCtx->ldtr.Attr.u;
2884
2885 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2886 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2887 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2888 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2889
2890 /* Validate. */
2891 if (!(u32Access & X86DESCATTR_UNUSABLE))
2892 {
2893 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2894 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2895 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2896 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2897 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2898 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2899 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2900 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2901 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2902 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2903 }
2904
2905 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2906 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2907 }
2908
2909 /*
2910 * Guest IDTR.
2911 */
2912 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2913 {
2914 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2915
2916 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2917 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2918
2919 /* Validate. */
2920 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2921
2922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2923 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2924 }
2925
2926 return VINF_SUCCESS;
2927}
2928
2929
2930/**
2931 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2932 * VM-exit interruption info type.
2933 *
2934 * @returns The IEM exception flags.
2935 * @param uVector The event vector.
2936 * @param uVmxEventType The VMX event type.
2937 *
2938 * @remarks This function currently only constructs flags required for
2939 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2940 * and CR2 aspects of an exception are not included).
2941 */
2942static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2943{
2944 uint32_t fIemXcptFlags;
2945 switch (uVmxEventType)
2946 {
2947 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2948 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2949 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2950 break;
2951
2952 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2953 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2954 break;
2955
2956 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2957 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2958 break;
2959
2960 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2961 {
2962 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2963 if (uVector == X86_XCPT_BP)
2964 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2965 else if (uVector == X86_XCPT_OF)
2966 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2967 else
2968 {
2969 fIemXcptFlags = 0;
2970 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2971 }
2972 break;
2973 }
2974
2975 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2976 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2977 break;
2978
2979 default:
2980 fIemXcptFlags = 0;
2981 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2982 break;
2983 }
2984 return fIemXcptFlags;
2985}
2986
2987
2988/**
2989 * Sets an event as a pending event to be injected into the guest.
2990 *
2991 * @param pVCpu The cross context virtual CPU structure.
2992 * @param u32IntInfo The VM-entry interruption-information field.
2993 * @param cbInstr The VM-entry instruction length in bytes (for
2994 * software interrupts, exceptions and privileged
2995 * software exceptions).
2996 * @param u32ErrCode The VM-entry exception error code.
2997 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
2998 * page-fault.
2999 */
3000DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3001 RTGCUINTPTR GCPtrFaultAddress)
3002{
3003 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3004 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3005 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3006 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3007 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3008 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3009}
3010
3011
3012/**
3013 * Sets an external interrupt as pending-for-injection into the VM.
3014 *
3015 * @param pVCpu The cross context virtual CPU structure.
3016 * @param u8Interrupt The external interrupt vector.
3017 */
3018DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3019{
3020 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3021 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3022 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3023 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3024 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3025}
3026
3027
3028/**
3029 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3030 *
3031 * @param pVCpu The cross context virtual CPU structure.
3032 */
3033DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3034{
3035 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3036 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3037 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3039 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3040}
3041
3042
3043/**
3044 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3045 *
3046 * @param pVCpu The cross context virtual CPU structure.
3047 */
3048DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3049{
3050 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3051 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3052 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3053 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3054 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3055}
3056
3057
3058/**
3059 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3060 *
3061 * @param pVCpu The cross context virtual CPU structure.
3062 */
3063DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3064{
3065 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3066 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3067 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3069 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3070}
3071
3072
3073/**
3074 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3075 *
3076 * @param pVCpu The cross context virtual CPU structure.
3077 */
3078DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3079{
3080 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3081 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3082 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3084 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3085}
3086
3087
3088#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3089/**
3090 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3091 *
3092 * @param pVCpu The cross context virtual CPU structure.
3093 * @param u32ErrCode The error code for the general-protection exception.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105/**
3106 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3107 *
3108 * @param pVCpu The cross context virtual CPU structure.
3109 * @param u32ErrCode The error code for the stack exception.
3110 */
3111DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3112{
3113 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3114 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3117 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3118}
3119#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3120
3121
3122/**
3123 * Fixes up attributes for the specified segment register.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param pSelReg The segment register that needs fixing.
3127 * @param pszRegName The register name (for logging and assertions).
3128 */
3129static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3130{
3131 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3132
3133 /*
3134 * If VT-x marks the segment as unusable, most other bits remain undefined:
3135 * - For CS the L, D and G bits have meaning.
3136 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3137 * - For the remaining data segments no bits are defined.
3138 *
3139 * The present bit and the unusable bit has been observed to be set at the
3140 * same time (the selector was supposed to be invalid as we started executing
3141 * a V8086 interrupt in ring-0).
3142 *
3143 * What should be important for the rest of the VBox code, is that the P bit is
3144 * cleared. Some of the other VBox code recognizes the unusable bit, but
3145 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3146 * safe side here, we'll strip off P and other bits we don't care about. If
3147 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3148 *
3149 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3150 */
3151#ifdef VBOX_STRICT
3152 uint32_t const uAttr = pSelReg->Attr.u;
3153#endif
3154
3155 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3156 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3157 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3158
3159#ifdef VBOX_STRICT
3160# ifndef IN_NEM_DARWIN
3161 VMMRZCallRing3Disable(pVCpu);
3162# endif
3163 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3164# ifdef DEBUG_bird
3165 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3166 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3167 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3168# endif
3169# ifndef IN_NEM_DARWIN
3170 VMMRZCallRing3Enable(pVCpu);
3171# endif
3172 NOREF(uAttr);
3173#endif
3174 RT_NOREF2(pVCpu, pszRegName);
3175}
3176
3177
3178/**
3179 * Imports a guest segment register from the current VMCS into the guest-CPU
3180 * context.
3181 *
3182 * @param pVCpu The cross context virtual CPU structure.
3183 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3184 *
3185 * @remarks Called with interrupts and/or preemption disabled.
3186 */
3187template<uint32_t const a_iSegReg>
3188DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3189{
3190 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3191 /* Check that the macros we depend upon here and in the export parenter function works: */
3192#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3193 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3194 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3195 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3196 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3197 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3198 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3199 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3200 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3201 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3202 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3203
3204 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3205
3206 uint16_t u16Sel;
3207 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3208 pSelReg->Sel = u16Sel;
3209 pSelReg->ValidSel = u16Sel;
3210
3211 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3212 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3213
3214 uint32_t u32Attr;
3215 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3216 pSelReg->Attr.u = u32Attr;
3217 if (u32Attr & X86DESCATTR_UNUSABLE)
3218 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3219
3220 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3221}
3222
3223
3224/**
3225 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3226 *
3227 * @param pVCpu The cross context virtual CPU structure.
3228 *
3229 * @remarks Called with interrupts and/or preemption disabled.
3230 */
3231DECLINLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3232{
3233 uint16_t u16Sel;
3234 uint64_t u64Base;
3235 uint32_t u32Limit, u32Attr;
3236 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3237 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3238 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3239 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3240
3241 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3242 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3243 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3244 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3245 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3246 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3247 if (u32Attr & X86DESCATTR_UNUSABLE)
3248 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3249}
3250
3251
3252/**
3253 * Imports the guest TR from the current VMCS into the guest-CPU context.
3254 *
3255 * @param pVCpu The cross context virtual CPU structure.
3256 *
3257 * @remarks Called with interrupts and/or preemption disabled.
3258 */
3259DECLINLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3260{
3261 uint16_t u16Sel;
3262 uint64_t u64Base;
3263 uint32_t u32Limit, u32Attr;
3264 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3265 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3266 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3267 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3268
3269 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3270 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3271 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3272 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3273 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3274 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3275 /* TR is the only selector that can never be unusable. */
3276 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3277}
3278
3279
3280/**
3281 * Core: Imports the guest RIP from the VMCS back into the guest-CPU context.
3282 *
3283 * @returns The RIP value.
3284 * @param pVCpu The cross context virtual CPU structure.
3285 *
3286 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3287 * @remarks Do -not- call this function directly!
3288 */
3289DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3290{
3291 uint64_t u64Val;
3292 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3293 AssertRC(rc);
3294
3295 pVCpu->cpum.GstCtx.rip = u64Val;
3296
3297 return u64Val;
3298}
3299
3300
3301/**
3302 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3303 *
3304 * @param pVCpu The cross context virtual CPU structure.
3305 *
3306 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3307 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3308 * instead!!!
3309 */
3310DECLINLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3311{
3312 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3313 {
3314 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3315 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3316 }
3317}
3318
3319
3320/**
3321 * Core: Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3322 *
3323 * @param pVCpu The cross context virtual CPU structure.
3324 * @param pVmcsInfo The VMCS info. object.
3325 *
3326 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3327 * @remarks Do -not- call this function directly!
3328 */
3329DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3330{
3331 uint64_t u64Val;
3332 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3333 AssertRC(rc);
3334
3335 pVCpu->cpum.GstCtx.rflags.u64 = u64Val;
3336#ifndef IN_NEM_DARWIN
3337 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3338 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3339 {
3340 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3341 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3342 }
3343#else
3344 RT_NOREF(pVmcsInfo);
3345#endif
3346}
3347
3348
3349/**
3350 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3351 *
3352 * @param pVCpu The cross context virtual CPU structure.
3353 * @param pVmcsInfo The VMCS info. object.
3354 *
3355 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3356 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3357 * instead!!!
3358 */
3359DECLINLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3360{
3361 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3362 {
3363 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3364 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3365 }
3366}
3367
3368
3369/**
3370 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3371 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3372 */
3373DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3374{
3375 /*
3376 * We must import RIP here to set our EM interrupt-inhibited state.
3377 * We also import RFLAGS as our code that evaluates pending interrupts
3378 * before VM-entry requires it.
3379 */
3380 vmxHCImportGuestRip(pVCpu);
3381 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3382
3383 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3384 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3385 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3386 pVCpu->cpum.GstCtx.rip);
3387 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3388}
3389
3390
3391/**
3392 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3393 * context.
3394 *
3395 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3396 *
3397 * @param pVCpu The cross context virtual CPU structure.
3398 * @param pVmcsInfo The VMCS info. object.
3399 *
3400 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3401 * do not log!
3402 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3403 * instead!!!
3404 */
3405DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3406{
3407 uint32_t u32Val;
3408 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3409 if (!u32Val)
3410 {
3411 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3412 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3413 }
3414 else
3415 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3416}
3417
3418
3419/**
3420 * Worker for VMXR0ImportStateOnDemand.
3421 *
3422 * @returns VBox status code.
3423 * @param pVCpu The cross context virtual CPU structure.
3424 * @param pVmcsInfo The VMCS info. object.
3425 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3426 */
3427static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3428{
3429 int rc = VINF_SUCCESS;
3430 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3431 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3432 uint32_t u32Val;
3433
3434 /*
3435 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3436 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3437 * neither are other host platforms.
3438 *
3439 * Committing this temporarily as it prevents BSOD.
3440 *
3441 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3442 */
3443#ifdef RT_OS_WINDOWS
3444 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3445 return VERR_HM_IPE_1;
3446#endif
3447
3448 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3449
3450#ifndef IN_NEM_DARWIN
3451 /*
3452 * We disable interrupts to make the updating of the state and in particular
3453 * the fExtrn modification atomic wrt to preemption hooks.
3454 */
3455 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3456#endif
3457
3458 fWhat &= pCtx->fExtrn;
3459 if (fWhat)
3460 {
3461 do
3462 {
3463 if (fWhat & CPUMCTX_EXTRN_RIP)
3464 vmxHCImportGuestRip(pVCpu);
3465
3466 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3467 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3468
3469 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3470 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3471 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3472
3473 if (fWhat & CPUMCTX_EXTRN_RSP)
3474 {
3475 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3476 AssertRC(rc);
3477 }
3478
3479 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3480 {
3481 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3482#ifndef IN_NEM_DARWIN
3483 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3484#else
3485 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3486#endif
3487 if (fWhat & CPUMCTX_EXTRN_CS)
3488 {
3489 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3490 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3491 if (fRealOnV86Active)
3492 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3493 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3494 }
3495 if (fWhat & CPUMCTX_EXTRN_SS)
3496 {
3497 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3498 if (fRealOnV86Active)
3499 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3500 }
3501 if (fWhat & CPUMCTX_EXTRN_DS)
3502 {
3503 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3504 if (fRealOnV86Active)
3505 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3506 }
3507 if (fWhat & CPUMCTX_EXTRN_ES)
3508 {
3509 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3510 if (fRealOnV86Active)
3511 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3512 }
3513 if (fWhat & CPUMCTX_EXTRN_FS)
3514 {
3515 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3516 if (fRealOnV86Active)
3517 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3518 }
3519 if (fWhat & CPUMCTX_EXTRN_GS)
3520 {
3521 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3522 if (fRealOnV86Active)
3523 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3524 }
3525 }
3526
3527 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3528 {
3529 if (fWhat & CPUMCTX_EXTRN_LDTR)
3530 vmxHCImportGuestLdtr(pVCpu);
3531
3532 if (fWhat & CPUMCTX_EXTRN_GDTR)
3533 {
3534 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3535 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3536 pCtx->gdtr.cbGdt = u32Val;
3537 }
3538
3539 /* Guest IDTR. */
3540 if (fWhat & CPUMCTX_EXTRN_IDTR)
3541 {
3542 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3543 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3544 pCtx->idtr.cbIdt = u32Val;
3545 }
3546
3547 /* Guest TR. */
3548 if (fWhat & CPUMCTX_EXTRN_TR)
3549 {
3550#ifndef IN_NEM_DARWIN
3551 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3552 don't need to import that one. */
3553 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3554#endif
3555 vmxHCImportGuestTr(pVCpu);
3556 }
3557 }
3558
3559 if (fWhat & CPUMCTX_EXTRN_DR7)
3560 {
3561#ifndef IN_NEM_DARWIN
3562 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3563#endif
3564 {
3565 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3566 AssertRC(rc);
3567 }
3568 }
3569
3570 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3571 {
3572 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3573 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3574 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3575 pCtx->SysEnter.cs = u32Val;
3576 }
3577
3578#ifndef IN_NEM_DARWIN
3579 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3580 {
3581 if ( pVM->hmr0.s.fAllow64BitGuests
3582 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3583 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3584 }
3585
3586 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3587 {
3588 if ( pVM->hmr0.s.fAllow64BitGuests
3589 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3590 {
3591 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3592 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3593 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3594 }
3595 }
3596
3597 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3598 {
3599 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3600 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3601 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3602 Assert(pMsrs);
3603 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3604 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3605 for (uint32_t i = 0; i < cMsrs; i++)
3606 {
3607 uint32_t const idMsr = pMsrs[i].u32Msr;
3608 switch (idMsr)
3609 {
3610 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3611 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3612 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3613 default:
3614 {
3615 uint32_t idxLbrMsr;
3616 if (VM_IS_VMX_LBR(pVM))
3617 {
3618 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3619 {
3620 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3621 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3622 break;
3623 }
3624 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3625 {
3626 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3627 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3628 break;
3629 }
3630 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3631 {
3632 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3633 break;
3634 }
3635 /* Fallthru (no break) */
3636 }
3637 pCtx->fExtrn = 0;
3638 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3639 ASMSetFlags(fEFlags);
3640 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3641 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3642 }
3643 }
3644 }
3645 }
3646#endif
3647
3648 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3649 {
3650 if (fWhat & CPUMCTX_EXTRN_CR0)
3651 {
3652 uint64_t u64Cr0;
3653 uint64_t u64Shadow;
3654 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3655 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3656#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3657 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3658 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3659#else
3660 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3661 {
3662 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3663 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3664 }
3665 else
3666 {
3667 /*
3668 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3669 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3670 * re-construct CR0. See @bugref{9180#c95} for details.
3671 */
3672 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3673 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3674 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3675 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3676 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3677 }
3678#endif
3679#ifndef IN_NEM_DARWIN
3680 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3681#endif
3682 CPUMSetGuestCR0(pVCpu, u64Cr0);
3683#ifndef IN_NEM_DARWIN
3684 VMMRZCallRing3Enable(pVCpu);
3685#endif
3686 }
3687
3688 if (fWhat & CPUMCTX_EXTRN_CR4)
3689 {
3690 uint64_t u64Cr4;
3691 uint64_t u64Shadow;
3692 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3693 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3694#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3695 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3696 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3697#else
3698 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3699 {
3700 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3701 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3702 }
3703 else
3704 {
3705 /*
3706 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3707 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3708 * re-construct CR4. See @bugref{9180#c95} for details.
3709 */
3710 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3711 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3712 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3713 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3714 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3715 }
3716#endif
3717 pCtx->cr4 = u64Cr4;
3718 }
3719
3720 if (fWhat & CPUMCTX_EXTRN_CR3)
3721 {
3722 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3723 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3724 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3725 && CPUMIsGuestPagingEnabledEx(pCtx)))
3726 {
3727 uint64_t u64Cr3;
3728 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3729 if (pCtx->cr3 != u64Cr3)
3730 {
3731 pCtx->cr3 = u64Cr3;
3732 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3733 }
3734
3735 /*
3736 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3737 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3738 */
3739 if (CPUMIsGuestInPAEModeEx(pCtx))
3740 {
3741 X86PDPE aPaePdpes[4];
3742 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3743 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3744 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3746 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3747 {
3748 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3749 /* PGM now updates PAE PDPTEs while updating CR3. */
3750 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3751 }
3752 }
3753 }
3754 }
3755 }
3756
3757#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3758 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3759 {
3760 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3761 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3762 {
3763 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3764 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3765 if (RT_SUCCESS(rc))
3766 { /* likely */ }
3767 else
3768 break;
3769 }
3770 }
3771#endif
3772 } while (0);
3773
3774 if (RT_SUCCESS(rc))
3775 {
3776 /* Update fExtrn. */
3777 pCtx->fExtrn &= ~fWhat;
3778
3779 /* If everything has been imported, clear the HM keeper bit. */
3780 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3781 {
3782#ifndef IN_NEM_DARWIN
3783 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3784#else
3785 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3786#endif
3787 Assert(!pCtx->fExtrn);
3788 }
3789 }
3790 }
3791#ifndef IN_NEM_DARWIN
3792 else
3793 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3794
3795 /*
3796 * Restore interrupts.
3797 */
3798 ASMSetFlags(fEFlags);
3799#endif
3800
3801 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3802
3803 if (RT_SUCCESS(rc))
3804 { /* likely */ }
3805 else
3806 return rc;
3807
3808 /*
3809 * Honor any pending CR3 updates.
3810 *
3811 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3812 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3813 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3814 *
3815 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3816 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3817 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3818 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3819 *
3820 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3821 *
3822 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3823 */
3824 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3825#ifndef IN_NEM_DARWIN
3826 && VMMRZCallRing3IsEnabled(pVCpu)
3827#endif
3828 )
3829 {
3830 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3831 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3832 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3833 }
3834
3835 return VINF_SUCCESS;
3836}
3837
3838
3839/**
3840 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3841 *
3842 * @returns VBox status code.
3843 * @param pVCpu The cross context virtual CPU structure.
3844 * @param pVmcsInfo The VMCS info. object.
3845 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3846 * in NEM/darwin context.
3847 * @tparam a_fWhat What to import, zero or more bits from
3848 * HMVMX_CPUMCTX_EXTRN_ALL.
3849 */
3850template<uint64_t const a_fWhat>
3851static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3852{
3853 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3854 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3855 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3856 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3857
3858 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3859
3860 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3861
3862 /* RIP and RFLAGS may have been imported already by the post exit code
3863 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3864 of the code is skipping this part of the code. */
3865 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3866 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3867 {
3868 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3869 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3870
3871 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3872 {
3873 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3874 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3875 else
3876 vmxHCImportGuestCoreRip(pVCpu);
3877 }
3878 }
3879
3880 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3881 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3882 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3883
3884 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3885 {
3886 if (a_fWhat & CPUMCTX_EXTRN_CS)
3887 {
3888 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3889 /** @todo try get rid of this carp, it smells and is probably never ever
3890 * used: */
3891 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3892 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3893 {
3894 vmxHCImportGuestCoreRip(pVCpu);
3895 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3896 }
3897 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3898 }
3899 if (a_fWhat & CPUMCTX_EXTRN_SS)
3900 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3901 if (a_fWhat & CPUMCTX_EXTRN_DS)
3902 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3903 if (a_fWhat & CPUMCTX_EXTRN_ES)
3904 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3905 if (a_fWhat & CPUMCTX_EXTRN_FS)
3906 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3907 if (a_fWhat & CPUMCTX_EXTRN_GS)
3908 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3909
3910 /* Guest TR.
3911 Real-mode emulation using virtual-8086 mode has the fake TSS
3912 (pRealModeTSS) in TR, don't need to import that one. */
3913#ifndef IN_NEM_DARWIN
3914 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3915 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3916 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3917#else
3918 if (a_fWhat & CPUMCTX_EXTRN_TR)
3919#endif
3920 vmxHCImportGuestTr(pVCpu);
3921
3922#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
3923 if (fRealOnV86Active)
3924 {
3925 if (a_fWhat & CPUMCTX_EXTRN_CS)
3926 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3927 if (a_fWhat & CPUMCTX_EXTRN_SS)
3928 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3929 if (a_fWhat & CPUMCTX_EXTRN_DS)
3930 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3931 if (a_fWhat & CPUMCTX_EXTRN_ES)
3932 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3933 if (a_fWhat & CPUMCTX_EXTRN_FS)
3934 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3935 if (a_fWhat & CPUMCTX_EXTRN_GS)
3936 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3937 }
3938#endif
3939 }
3940
3941 if (a_fWhat & CPUMCTX_EXTRN_RSP)
3942 {
3943 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
3944 AssertRC(rc);
3945 }
3946
3947 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
3948 vmxHCImportGuestLdtr(pVCpu);
3949
3950 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
3951 {
3952 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
3953 uint32_t u32Val;
3954 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
3955 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
3956 }
3957
3958 /* Guest IDTR. */
3959 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
3960 {
3961 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
3962 uint32_t u32Val;
3963 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
3964 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
3965 }
3966
3967 if (a_fWhat & CPUMCTX_EXTRN_DR7)
3968 {
3969#ifndef IN_NEM_DARWIN
3970 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3971#endif
3972 {
3973 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
3974 AssertRC(rc);
3975 }
3976 }
3977
3978 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3979 {
3980 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
3981 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
3982 uint32_t u32Val;
3983 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
3984 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
3985 }
3986
3987#ifndef IN_NEM_DARWIN
3988 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3989 {
3990 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
3991 && pVM->hmr0.s.fAllow64BitGuests)
3992 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3993 }
3994
3995 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3996 {
3997 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
3998 && pVM->hmr0.s.fAllow64BitGuests)
3999 {
4000 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4001 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4002 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4003 }
4004 }
4005
4006 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4007 {
4008 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4009 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
4010 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
4011 Assert(pMsrs);
4012 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
4013 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
4014 for (uint32_t i = 0; i < cMsrs; i++)
4015 {
4016 uint32_t const idMsr = pMsrs[i].u32Msr;
4017 switch (idMsr)
4018 {
4019 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
4020 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
4021 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
4022 default:
4023 {
4024 uint32_t idxLbrMsr;
4025 if (VM_IS_VMX_LBR(pVM))
4026 {
4027 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
4028 {
4029 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4030 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4031 break;
4032 }
4033 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
4034 {
4035 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
4036 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
4037 break;
4038 }
4039 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
4040 {
4041 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
4042 break;
4043 }
4044 }
4045 pVCpu->cpum.GstCtx.fExtrn = 0;
4046 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
4047 ASMSetFlags(fEFlags);
4048 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
4049 return VERR_HM_UNEXPECTED_LD_ST_MSR;
4050 }
4051 }
4052 }
4053 }
4054#endif
4055
4056 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4057 {
4058 uint64_t u64Cr0;
4059 uint64_t u64Shadow;
4060 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc1);
4061 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4062#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4063 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4064 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4065#else
4066 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4067 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4068 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
4069 else
4070 {
4071 /*
4072 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
4073 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4074 * re-construct CR0. See @bugref{9180#c95} for details.
4075 */
4076 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4077 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4078 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
4079 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
4080 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
4081 }
4082#endif
4083#ifndef IN_NEM_DARWIN
4084 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
4085#endif
4086 CPUMSetGuestCR0(pVCpu, u64Cr0);
4087#ifndef IN_NEM_DARWIN
4088 VMMRZCallRing3Enable(pVCpu);
4089#endif
4090 }
4091
4092 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4093 {
4094 uint64_t u64Cr4;
4095 uint64_t u64Shadow;
4096 int rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc1);
4097 int rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc2);
4098#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4099 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4100 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4101#else
4102 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4103 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4104 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
4105 else
4106 {
4107 /*
4108 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
4109 * the nested-guest using hardware-assisted VMX. Accordingly we need to
4110 * re-construct CR4. See @bugref{9180#c95} for details.
4111 */
4112 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
4113 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4114 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
4115 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
4116 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
4117 }
4118#endif
4119 pVCpu->cpum.GstCtx.cr4 = u64Cr4;
4120 }
4121
4122 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4123 {
4124 /* CR0.PG bit changes are always intercepted, so it's up to date. */
4125 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
4126 || ( VM_IS_VMX_NESTED_PAGING(pVM)
4127 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)))
4128 {
4129 uint64_t u64Cr3;
4130 int const rc0 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc0);
4131 if (pVCpu->cpum.GstCtx.cr3 != u64Cr3)
4132 {
4133 pVCpu->cpum.GstCtx.cr3 = u64Cr3;
4134 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4135 }
4136
4137 /*
4138 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
4139 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
4140 */
4141 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
4142 {
4143 X86PDPE aPaePdpes[4];
4144 int const rc1 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc1);
4145 int const rc2 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc2);
4146 int const rc3 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc3);
4147 int const rc4 = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc4);
4148 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
4149 {
4150 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
4151 /* PGM now updates PAE PDPTEs while updating CR3. */
4152 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
4153 }
4154 }
4155 }
4156 }
4157
4158#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4159 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4160 {
4161 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4162 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4163 {
4164 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4165 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4166 AssertRCReturn(rc, rc);
4167 }
4168 }
4169#endif
4170
4171 /* Update fExtrn. */
4172 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4173
4174 /* If everything has been imported, clear the HM keeper bit. */
4175 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4176 {
4177#ifndef IN_NEM_DARWIN
4178 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4179#else
4180 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4181#endif
4182 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4183 }
4184
4185 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4186
4187 /*
4188 * Honor any pending CR3 updates.
4189 *
4190 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4191 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4192 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4193 *
4194 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4195 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4196 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4197 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4198 *
4199 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4200 *
4201 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4202 */
4203#ifndef IN_NEM_DARWIN
4204 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4205 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4206 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4207 return VINF_SUCCESS;
4208 ASMSetFlags(fEFlags);
4209#else
4210 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4211 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4212 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4213 return VINF_SUCCESS;
4214 RT_NOREF_PV(fEFlags);
4215#endif
4216
4217 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4218 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4220 return VINF_SUCCESS;
4221}
4222
4223
4224/**
4225 * Internal state fetcher.
4226 *
4227 * @returns VBox status code.
4228 * @param pVCpu The cross context virtual CPU structure.
4229 * @param pVmcsInfo The VMCS info. object.
4230 * @param pszCaller For logging.
4231 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4232 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4233 * already. This is ORed together with @a a_fWhat when
4234 * calculating what needs fetching (just for safety).
4235 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4236 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4237 * already. This is ORed together with @a a_fWhat when
4238 * calculating what needs fetching (just for safety).
4239 */
4240template<uint64_t const a_fWhat,
4241 uint64_t const a_fDoneLocal = 0,
4242 uint64_t const a_fDonePostExit = 0
4243#ifndef IN_NEM_DARWIN
4244 | CPUMCTX_EXTRN_INHIBIT_INT
4245 | CPUMCTX_EXTRN_INHIBIT_NMI
4246# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4247 | HMVMX_CPUMCTX_EXTRN_ALL
4248# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4249 | CPUMCTX_EXTRN_RFLAGS
4250# endif
4251#else /* IN_NEM_DARWIN */
4252 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4253#endif /* IN_NEM_DARWIN */
4254>
4255DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4256{
4257 RT_NOREF_PV(pszCaller);
4258 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4259 {
4260#ifndef IN_NEM_DARWIN
4261 /*
4262 * We disable interrupts to make the updating of the state and in particular
4263 * the fExtrn modification atomic wrt to preemption hooks.
4264 */
4265 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4266#else
4267 RTCCUINTREG const fEFlags = 0;
4268#endif
4269
4270 /*
4271 * We combine all three parameters and take the (probably) inlined optimized
4272 * code path for the new things specified in a_fWhat.
4273 *
4274 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4275 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4276 * also take the streamlined path when both of these are cleared in fExtrn
4277 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4278 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4279 */
4280 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4281 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4282 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4283 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4284 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4285 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4286 {
4287 int const rc = vmxHCImportGuestStateInner< a_fWhat
4288 & HMVMX_CPUMCTX_EXTRN_ALL
4289 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4290#ifndef IN_NEM_DARWIN
4291 ASMSetFlags(fEFlags);
4292#endif
4293 return rc;
4294 }
4295
4296#ifndef IN_NEM_DARWIN
4297 ASMSetFlags(fEFlags);
4298#endif
4299
4300 /*
4301 * We shouldn't normally get here, but it may happen when executing
4302 * in the debug run-loops. Typically, everything should already have
4303 * been fetched then. Otherwise call the fallback state import function.
4304 */
4305 if (fWhatToDo == 0)
4306 { /* hope the cause was the debug loop or something similar */ }
4307 else
4308 {
4309 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4310 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4311 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4312 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4313 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4314 }
4315 }
4316 return VINF_SUCCESS;
4317}
4318
4319
4320/**
4321 * Check per-VM and per-VCPU force flag actions that require us to go back to
4322 * ring-3 for one reason or another.
4323 *
4324 * @returns Strict VBox status code (i.e. informational status codes too)
4325 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4326 * ring-3.
4327 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4328 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4329 * interrupts)
4330 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4331 * all EMTs to be in ring-3.
4332 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4333 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4334 * to the EM loop.
4335 *
4336 * @param pVCpu The cross context virtual CPU structure.
4337 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4338 * @param fStepping Whether we are single-stepping the guest using the
4339 * hypervisor debugger.
4340 *
4341 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4342 * is no longer in VMX non-root mode.
4343 */
4344static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4345{
4346#ifndef IN_NEM_DARWIN
4347 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4348#endif
4349
4350 /*
4351 * Update pending interrupts into the APIC's IRR.
4352 */
4353 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4354 APICUpdatePendingInterrupts(pVCpu);
4355
4356 /*
4357 * Anything pending? Should be more likely than not if we're doing a good job.
4358 */
4359 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4360 if ( !fStepping
4361 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4362 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4363 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4364 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4365 return VINF_SUCCESS;
4366
4367 /* Pending PGM C3 sync. */
4368 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4369 {
4370 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4371 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4372 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4373 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4374 if (rcStrict != VINF_SUCCESS)
4375 {
4376 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4377 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4378 return rcStrict;
4379 }
4380 }
4381
4382 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4383 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4384 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4385 {
4386 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4387 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4388 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4389 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4390 return rc;
4391 }
4392
4393 /* Pending VM request packets, such as hardware interrupts. */
4394 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4395 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4396 {
4397 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4398 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4399 return VINF_EM_PENDING_REQUEST;
4400 }
4401
4402 /* Pending PGM pool flushes. */
4403 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4404 {
4405 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4406 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4407 return VINF_PGM_POOL_FLUSH_PENDING;
4408 }
4409
4410 /* Pending DMA requests. */
4411 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4412 {
4413 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4414 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4415 return VINF_EM_RAW_TO_R3;
4416 }
4417
4418#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4419 /*
4420 * Pending nested-guest events.
4421 *
4422 * Please note the priority of these events are specified and important.
4423 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4424 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4425 */
4426 if (fIsNestedGuest)
4427 {
4428 /* Pending nested-guest APIC-write. */
4429 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4430 {
4431 Log4Func(("Pending nested-guest APIC-write\n"));
4432 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4433 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4434 return rcStrict;
4435 }
4436
4437 /* Pending nested-guest monitor-trap flag (MTF). */
4438 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4439 {
4440 Log4Func(("Pending nested-guest MTF\n"));
4441 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4442 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4443 return rcStrict;
4444 }
4445
4446 /* Pending nested-guest VMX-preemption timer expired. */
4447 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4448 {
4449 Log4Func(("Pending nested-guest preempt timer\n"));
4450 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4451 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4452 return rcStrict;
4453 }
4454 }
4455#else
4456 NOREF(fIsNestedGuest);
4457#endif
4458
4459 return VINF_SUCCESS;
4460}
4461
4462
4463/**
4464 * Converts any TRPM trap into a pending HM event. This is typically used when
4465 * entering from ring-3 (not longjmp returns).
4466 *
4467 * @param pVCpu The cross context virtual CPU structure.
4468 */
4469static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4470{
4471 Assert(TRPMHasTrap(pVCpu));
4472 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4473
4474 uint8_t uVector;
4475 TRPMEVENT enmTrpmEvent;
4476 uint32_t uErrCode;
4477 RTGCUINTPTR GCPtrFaultAddress;
4478 uint8_t cbInstr;
4479 bool fIcebp;
4480
4481 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4482 AssertRC(rc);
4483
4484 uint32_t u32IntInfo;
4485 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4486 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4487
4488 rc = TRPMResetTrap(pVCpu);
4489 AssertRC(rc);
4490 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4491 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4492
4493 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4494}
4495
4496
4497/**
4498 * Converts the pending HM event into a TRPM trap.
4499 *
4500 * @param pVCpu The cross context virtual CPU structure.
4501 */
4502static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4503{
4504 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4505
4506 /* If a trap was already pending, we did something wrong! */
4507 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4508
4509 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4510 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4511 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4512
4513 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4514
4515 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4516 AssertRC(rc);
4517
4518 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4519 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4520
4521 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4522 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4523 else
4524 {
4525 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4526 switch (uVectorType)
4527 {
4528 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4529 TRPMSetTrapDueToIcebp(pVCpu);
4530 RT_FALL_THRU();
4531 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4532 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4533 {
4534 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4535 || ( uVector == X86_XCPT_BP /* INT3 */
4536 || uVector == X86_XCPT_OF /* INTO */
4537 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4538 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4539 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4540 break;
4541 }
4542 }
4543 }
4544
4545 /* We're now done converting the pending event. */
4546 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4547}
4548
4549
4550/**
4551 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4552 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4553 *
4554 * @param pVCpu The cross context virtual CPU structure.
4555 * @param pVmcsInfo The VMCS info. object.
4556 */
4557static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4558{
4559 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4560 {
4561 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4562 {
4563 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4564 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4565 AssertRC(rc);
4566 }
4567 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4568}
4569
4570
4571/**
4572 * Clears the interrupt-window exiting control in the VMCS.
4573 *
4574 * @param pVCpu The cross context virtual CPU structure.
4575 * @param pVmcsInfo The VMCS info. object.
4576 */
4577DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4578{
4579 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4580 {
4581 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4582 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4583 AssertRC(rc);
4584 }
4585}
4586
4587
4588/**
4589 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4590 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4591 *
4592 * @param pVCpu The cross context virtual CPU structure.
4593 * @param pVmcsInfo The VMCS info. object.
4594 */
4595static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4596{
4597 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4598 {
4599 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4600 {
4601 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4602 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4603 AssertRC(rc);
4604 Log4Func(("Setup NMI-window exiting\n"));
4605 }
4606 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4607}
4608
4609
4610/**
4611 * Clears the NMI-window exiting control in the VMCS.
4612 *
4613 * @param pVCpu The cross context virtual CPU structure.
4614 * @param pVmcsInfo The VMCS info. object.
4615 */
4616DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4617{
4618 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4619 {
4620 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4621 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4622 AssertRC(rc);
4623 }
4624}
4625
4626
4627/**
4628 * Injects an event into the guest upon VM-entry by updating the relevant fields
4629 * in the VM-entry area in the VMCS.
4630 *
4631 * @returns Strict VBox status code (i.e. informational status codes too).
4632 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4633 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4634 *
4635 * @param pVCpu The cross context virtual CPU structure.
4636 * @param pVmcsInfo The VMCS info object.
4637 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4638 * @param pEvent The event being injected.
4639 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4640 * will be updated if necessary. This cannot not be NULL.
4641 * @param fStepping Whether we're single-stepping guest execution and should
4642 * return VINF_EM_DBG_STEPPED if the event is injected
4643 * directly (registers modified by us, not by hardware on
4644 * VM-entry).
4645 */
4646static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4647 bool fStepping, uint32_t *pfIntrState)
4648{
4649 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4650 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4651 Assert(pfIntrState);
4652
4653#ifdef IN_NEM_DARWIN
4654 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4655#endif
4656
4657 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4658 uint32_t u32IntInfo = pEvent->u64IntInfo;
4659 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4660 uint32_t const cbInstr = pEvent->cbInstr;
4661 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4662 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4663 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4664
4665#ifdef VBOX_STRICT
4666 /*
4667 * Validate the error-code-valid bit for hardware exceptions.
4668 * No error codes for exceptions in real-mode.
4669 *
4670 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4671 */
4672 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4673 && !CPUMIsGuestInRealModeEx(pCtx))
4674 {
4675 switch (uVector)
4676 {
4677 case X86_XCPT_PF:
4678 case X86_XCPT_DF:
4679 case X86_XCPT_TS:
4680 case X86_XCPT_NP:
4681 case X86_XCPT_SS:
4682 case X86_XCPT_GP:
4683 case X86_XCPT_AC:
4684 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4685 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4686 RT_FALL_THRU();
4687 default:
4688 break;
4689 }
4690 }
4691
4692 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4693 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4694 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4695#endif
4696
4697 RT_NOREF(uVector);
4698 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4699 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4700 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4701 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4702 {
4703 Assert(uVector <= X86_XCPT_LAST);
4704 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4705 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4706 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4707 }
4708 else
4709 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4710
4711 /*
4712 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4713 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4714 * interrupt handler in the (real-mode) guest.
4715 *
4716 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4717 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4718 */
4719 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4720 {
4721#ifndef IN_NEM_DARWIN
4722 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4723#endif
4724 {
4725 /*
4726 * For CPUs with unrestricted guest execution enabled and with the guest
4727 * in real-mode, we must not set the deliver-error-code bit.
4728 *
4729 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4730 */
4731 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4732 }
4733#ifndef IN_NEM_DARWIN
4734 else
4735 {
4736 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4737 Assert(PDMVmmDevHeapIsEnabled(pVM));
4738 Assert(pVM->hm.s.vmx.pRealModeTSS);
4739 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4740
4741 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4742 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4743 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4744 AssertRCReturn(rc2, rc2);
4745
4746 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4747 size_t const cbIdtEntry = sizeof(X86IDTR16);
4748 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4749 {
4750 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4751 if (uVector == X86_XCPT_DF)
4752 return VINF_EM_RESET;
4753
4754 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4755 No error codes for exceptions in real-mode. */
4756 if (uVector == X86_XCPT_GP)
4757 {
4758 static HMEVENT const s_EventXcptDf
4759 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4760 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4761 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4762 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4763 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4764 }
4765
4766 /*
4767 * If we're injecting an event with no valid IDT entry, inject a #GP.
4768 * No error codes for exceptions in real-mode.
4769 *
4770 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4771 */
4772 static HMEVENT const s_EventXcptGp
4773 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4774 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4775 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4776 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4777 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4778 }
4779
4780 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4781 uint16_t uGuestIp = pCtx->ip;
4782 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4783 {
4784 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4785 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4786 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4787 }
4788 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4789 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4790
4791 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4792 X86IDTR16 IdtEntry;
4793 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4794 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4795 AssertRCReturn(rc2, rc2);
4796
4797 /* Construct the stack frame for the interrupt/exception handler. */
4798 VBOXSTRICTRC rcStrict;
4799 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4800 if (rcStrict == VINF_SUCCESS)
4801 {
4802 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4803 if (rcStrict == VINF_SUCCESS)
4804 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4805 }
4806
4807 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4808 if (rcStrict == VINF_SUCCESS)
4809 {
4810 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4811 pCtx->rip = IdtEntry.offSel;
4812 pCtx->cs.Sel = IdtEntry.uSel;
4813 pCtx->cs.ValidSel = IdtEntry.uSel;
4814 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4815 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4816 && uVector == X86_XCPT_PF)
4817 pCtx->cr2 = GCPtrFault;
4818
4819 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4820 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4821 | HM_CHANGED_GUEST_RSP);
4822
4823 /*
4824 * If we delivered a hardware exception (other than an NMI) and if there was
4825 * block-by-STI in effect, we should clear it.
4826 */
4827 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4828 {
4829 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4830 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4831 Log4Func(("Clearing inhibition due to STI\n"));
4832 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4833 }
4834
4835 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4836 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4837
4838 /*
4839 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4840 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4841 */
4842 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4843
4844 /*
4845 * If we eventually support nested-guest execution without unrestricted guest execution,
4846 * we should set fInterceptEvents here.
4847 */
4848 Assert(!fIsNestedGuest);
4849
4850 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4851 if (fStepping)
4852 rcStrict = VINF_EM_DBG_STEPPED;
4853 }
4854 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4855 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4856 return rcStrict;
4857 }
4858#else
4859 RT_NOREF(pVmcsInfo);
4860#endif
4861 }
4862
4863 /*
4864 * Validate.
4865 */
4866 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4867 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4868
4869 /*
4870 * Inject the event into the VMCS.
4871 */
4872 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4873 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4874 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4875 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4876 AssertRC(rc);
4877
4878 /*
4879 * Update guest CR2 if this is a page-fault.
4880 */
4881 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4882 pCtx->cr2 = GCPtrFault;
4883
4884 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4885 return VINF_SUCCESS;
4886}
4887
4888
4889/**
4890 * Evaluates the event to be delivered to the guest and sets it as the pending
4891 * event.
4892 *
4893 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4894 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4895 * NOT restore these force-flags.
4896 *
4897 * @returns Strict VBox status code (i.e. informational status codes too).
4898 * @param pVCpu The cross context virtual CPU structure.
4899 * @param pVmcsInfo The VMCS information structure.
4900 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4901 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4902 */
4903static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4904{
4905 Assert(pfIntrState);
4906 Assert(!TRPMHasTrap(pVCpu));
4907
4908 /*
4909 * Compute/update guest-interruptibility state related FFs.
4910 * The FFs will be used below while evaluating events to be injected.
4911 */
4912 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4913
4914 /*
4915 * Evaluate if a new event needs to be injected.
4916 * An event that's already pending has already performed all necessary checks.
4917 */
4918 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4919 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4920 {
4921 /** @todo SMI. SMIs take priority over NMIs. */
4922
4923 /*
4924 * NMIs.
4925 * NMIs take priority over external interrupts.
4926 */
4927#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4928 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4929#endif
4930 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4931 {
4932 /*
4933 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4934 *
4935 * For a nested-guest, the FF always indicates the outer guest's ability to
4936 * receive an NMI while the guest-interruptibility state bit depends on whether
4937 * the nested-hypervisor is using virtual-NMIs.
4938 */
4939 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4940 {
4941#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4942 if ( fIsNestedGuest
4943 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4944 return IEMExecVmxVmexitXcptNmi(pVCpu);
4945#endif
4946 vmxHCSetPendingXcptNmi(pVCpu);
4947 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4948 Log4Func(("NMI pending injection\n"));
4949
4950 /* We've injected the NMI, bail. */
4951 return VINF_SUCCESS;
4952 }
4953 if (!fIsNestedGuest)
4954 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4955 }
4956
4957 /*
4958 * External interrupts (PIC/APIC).
4959 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4960 * We cannot re-request the interrupt from the controller again.
4961 */
4962 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4963 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4964 {
4965 Assert(!DBGFIsStepping(pVCpu));
4966 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4967 AssertRC(rc);
4968
4969 /*
4970 * We must not check EFLAGS directly when executing a nested-guest, use
4971 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4972 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4973 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4974 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4975 *
4976 * See Intel spec. 25.4.1 "Event Blocking".
4977 */
4978 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4979 {
4980#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4981 if ( fIsNestedGuest
4982 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4983 {
4984 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4985 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4986 return rcStrict;
4987 }
4988#endif
4989 uint8_t u8Interrupt;
4990 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4991 if (RT_SUCCESS(rc))
4992 {
4993#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4994 if ( fIsNestedGuest
4995 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4996 {
4997 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4998 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4999 return rcStrict;
5000 }
5001#endif
5002 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5003 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
5004 }
5005 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
5006 {
5007 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
5008
5009 if ( !fIsNestedGuest
5010 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
5011 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
5012 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
5013
5014 /*
5015 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
5016 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
5017 * need to re-set this force-flag here.
5018 */
5019 }
5020 else
5021 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
5022
5023 /* We've injected the interrupt or taken necessary action, bail. */
5024 return VINF_SUCCESS;
5025 }
5026 if (!fIsNestedGuest)
5027 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5028 }
5029 }
5030 else if (!fIsNestedGuest)
5031 {
5032 /*
5033 * An event is being injected or we are in an interrupt shadow. Check if another event is
5034 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
5035 * the pending event.
5036 */
5037 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5038 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
5039 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5040 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5041 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5042 }
5043 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
5044
5045 return VINF_SUCCESS;
5046}
5047
5048
5049/**
5050 * Injects any pending events into the guest if the guest is in a state to
5051 * receive them.
5052 *
5053 * @returns Strict VBox status code (i.e. informational status codes too).
5054 * @param pVCpu The cross context virtual CPU structure.
5055 * @param pVmcsInfo The VMCS information structure.
5056 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5057 * @param fIntrState The VT-x guest-interruptibility state.
5058 * @param fStepping Whether we are single-stepping the guest using the
5059 * hypervisor debugger and should return
5060 * VINF_EM_DBG_STEPPED if the event was dispatched
5061 * directly.
5062 */
5063static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5064 uint32_t fIntrState, bool fStepping)
5065{
5066 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5067#ifndef IN_NEM_DARWIN
5068 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5069#endif
5070
5071#ifdef VBOX_STRICT
5072 /*
5073 * Verify guest-interruptibility state.
5074 *
5075 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5076 * since injecting an event may modify the interruptibility state and we must thus always
5077 * use fIntrState.
5078 */
5079 {
5080 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5081 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5082 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5083 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5084 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5085 Assert(!TRPMHasTrap(pVCpu));
5086 NOREF(fBlockMovSS); NOREF(fBlockSti);
5087 }
5088#endif
5089
5090 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5091 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5092 {
5093 /*
5094 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5095 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5096 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5097 *
5098 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5099 */
5100 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5101#ifdef VBOX_STRICT
5102 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5103 {
5104 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
5105 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5106 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5107 }
5108 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5109 {
5110 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5111 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5112 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5113 }
5114#endif
5115 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5116 uIntType));
5117
5118 /*
5119 * Inject the event and get any changes to the guest-interruptibility state.
5120 *
5121 * The guest-interruptibility state may need to be updated if we inject the event
5122 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5123 */
5124 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5125 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5126
5127 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5128 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5129 else
5130 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5131 }
5132
5133 /*
5134 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5135 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5136 */
5137 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5138 && !fIsNestedGuest)
5139 {
5140 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5141
5142 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5143 {
5144 /*
5145 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5146 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5147 */
5148 Assert(!DBGFIsStepping(pVCpu));
5149 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
5150 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5151 AssertRC(rc);
5152 }
5153 else
5154 {
5155 /*
5156 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5157 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5158 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5159 * we use MTF, so just make sure it's called before executing guest-code.
5160 */
5161 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5162 }
5163 }
5164 /* else: for nested-guest currently handling while merging controls. */
5165
5166 /*
5167 * Finally, update the guest-interruptibility state.
5168 *
5169 * This is required for the real-on-v86 software interrupt injection, for
5170 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5171 */
5172 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5173 AssertRC(rc);
5174
5175 /*
5176 * There's no need to clear the VM-entry interruption-information field here if we're not
5177 * injecting anything. VT-x clears the valid bit on every VM-exit.
5178 *
5179 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5180 */
5181
5182 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5183 return rcStrict;
5184}
5185
5186
5187/**
5188 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5189 * and update error record fields accordingly.
5190 *
5191 * @returns VMX_IGS_* error codes.
5192 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5193 * wrong with the guest state.
5194 *
5195 * @param pVCpu The cross context virtual CPU structure.
5196 * @param pVmcsInfo The VMCS info. object.
5197 *
5198 * @remarks This function assumes our cache of the VMCS controls
5199 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5200 */
5201static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5202{
5203#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5204#define HMVMX_CHECK_BREAK(expr, err) do { \
5205 if (!(expr)) { uError = (err); break; } \
5206 } while (0)
5207
5208 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5209 uint32_t uError = VMX_IGS_ERROR;
5210 uint32_t u32IntrState = 0;
5211#ifndef IN_NEM_DARWIN
5212 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5213 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5214#else
5215 bool const fUnrestrictedGuest = true;
5216#endif
5217 do
5218 {
5219 int rc;
5220
5221 /*
5222 * Guest-interruptibility state.
5223 *
5224 * Read this first so that any check that fails prior to those that actually
5225 * require the guest-interruptibility state would still reflect the correct
5226 * VMCS value and avoids causing further confusion.
5227 */
5228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5229 AssertRC(rc);
5230
5231 uint32_t u32Val;
5232 uint64_t u64Val;
5233
5234 /*
5235 * CR0.
5236 */
5237 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5238 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5239 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5240 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5241 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5242 if (fUnrestrictedGuest)
5243 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5244
5245 uint64_t u64GuestCr0;
5246 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5247 AssertRC(rc);
5248 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5249 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5250 if ( !fUnrestrictedGuest
5251 && (u64GuestCr0 & X86_CR0_PG)
5252 && !(u64GuestCr0 & X86_CR0_PE))
5253 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5254
5255 /*
5256 * CR4.
5257 */
5258 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5259 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5260 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5261
5262 uint64_t u64GuestCr4;
5263 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5264 AssertRC(rc);
5265 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5266 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5267
5268 /*
5269 * IA32_DEBUGCTL MSR.
5270 */
5271 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5272 AssertRC(rc);
5273 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5274 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5275 {
5276 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5277 }
5278 uint64_t u64DebugCtlMsr = u64Val;
5279
5280#ifdef VBOX_STRICT
5281 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5282 AssertRC(rc);
5283 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5284#endif
5285 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5286
5287 /*
5288 * RIP and RFLAGS.
5289 */
5290 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5291 AssertRC(rc);
5292 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5293 if ( !fLongModeGuest
5294 || !pCtx->cs.Attr.n.u1Long)
5295 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5296 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5297 * must be identical if the "IA-32e mode guest" VM-entry
5298 * control is 1 and CS.L is 1. No check applies if the
5299 * CPU supports 64 linear-address bits. */
5300
5301 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5302 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5303 AssertRC(rc);
5304 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5305 VMX_IGS_RFLAGS_RESERVED);
5306 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5307 uint32_t const u32Eflags = u64Val;
5308
5309 if ( fLongModeGuest
5310 || ( fUnrestrictedGuest
5311 && !(u64GuestCr0 & X86_CR0_PE)))
5312 {
5313 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5314 }
5315
5316 uint32_t u32EntryInfo;
5317 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5318 AssertRC(rc);
5319 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5320 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5321
5322 /*
5323 * 64-bit checks.
5324 */
5325 if (fLongModeGuest)
5326 {
5327 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5328 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5329 }
5330
5331 if ( !fLongModeGuest
5332 && (u64GuestCr4 & X86_CR4_PCIDE))
5333 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5334
5335 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5336 * 51:32 beyond the processor's physical-address width are 0. */
5337
5338 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5339 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5340 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5341
5342#ifndef IN_NEM_DARWIN
5343 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5344 AssertRC(rc);
5345 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5346
5347 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5348 AssertRC(rc);
5349 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5350#endif
5351
5352 /*
5353 * PERF_GLOBAL MSR.
5354 */
5355 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5356 {
5357 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5358 AssertRC(rc);
5359 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5360 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5361 }
5362
5363 /*
5364 * PAT MSR.
5365 */
5366 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5367 {
5368 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5369 AssertRC(rc);
5370 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5371 for (unsigned i = 0; i < 8; i++)
5372 {
5373 uint8_t u8Val = (u64Val & 0xff);
5374 if ( u8Val != 0 /* UC */
5375 && u8Val != 1 /* WC */
5376 && u8Val != 4 /* WT */
5377 && u8Val != 5 /* WP */
5378 && u8Val != 6 /* WB */
5379 && u8Val != 7 /* UC- */)
5380 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5381 u64Val >>= 8;
5382 }
5383 }
5384
5385 /*
5386 * EFER MSR.
5387 */
5388 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5389 {
5390 Assert(g_fHmVmxSupportsVmcsEfer);
5391 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5392 AssertRC(rc);
5393 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5394 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5395 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5396 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5397 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5398 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5399 * iemVmxVmentryCheckGuestState(). */
5400 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5401 || !(u64GuestCr0 & X86_CR0_PG)
5402 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5403 VMX_IGS_EFER_LMA_LME_MISMATCH);
5404 }
5405
5406 /*
5407 * Segment registers.
5408 */
5409 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5410 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5411 if (!(u32Eflags & X86_EFL_VM))
5412 {
5413 /* CS */
5414 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5415 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5416 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5417 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5418 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5419 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5420 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5421 /* CS cannot be loaded with NULL in protected mode. */
5422 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5423 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5424 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5425 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5426 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5427 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5428 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5429 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5430 else
5431 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5432
5433 /* SS */
5434 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5435 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5436 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5437 if ( !(pCtx->cr0 & X86_CR0_PE)
5438 || pCtx->cs.Attr.n.u4Type == 3)
5439 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5440
5441 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5442 {
5443 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5444 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5445 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5446 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5447 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5448 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5449 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5450 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5451 }
5452
5453 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5454 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5455 {
5456 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5457 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5458 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5459 || pCtx->ds.Attr.n.u4Type > 11
5460 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5461 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5462 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5463 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5464 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5465 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5466 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5467 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5468 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5469 }
5470 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5471 {
5472 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5473 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5474 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5475 || pCtx->es.Attr.n.u4Type > 11
5476 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5477 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5478 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5479 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5480 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5481 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5482 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5483 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5484 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5485 }
5486 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5487 {
5488 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5489 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5490 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5491 || pCtx->fs.Attr.n.u4Type > 11
5492 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5493 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5494 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5495 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5496 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5497 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5498 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5499 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5500 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5501 }
5502 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5503 {
5504 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5505 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5506 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5507 || pCtx->gs.Attr.n.u4Type > 11
5508 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5509 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5510 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5511 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5512 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5513 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5514 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5515 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5516 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5517 }
5518 /* 64-bit capable CPUs. */
5519 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5520 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5521 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5522 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5523 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5524 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5525 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5526 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5527 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5528 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5529 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5530 }
5531 else
5532 {
5533 /* V86 mode checks. */
5534 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5535 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5536 {
5537 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5538 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5539 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5540 }
5541 else
5542 {
5543 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5544 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5545 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5546 }
5547
5548 /* CS */
5549 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5550 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5551 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5552 /* SS */
5553 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5554 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5555 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5556 /* DS */
5557 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5558 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5559 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5560 /* ES */
5561 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5562 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5563 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5564 /* FS */
5565 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5566 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5567 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5568 /* GS */
5569 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5570 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5571 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5572 /* 64-bit capable CPUs. */
5573 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5574 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5575 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5576 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5577 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5578 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5579 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5580 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5581 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5582 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5583 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5584 }
5585
5586 /*
5587 * TR.
5588 */
5589 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5590 /* 64-bit capable CPUs. */
5591 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5592 if (fLongModeGuest)
5593 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5594 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5595 else
5596 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5597 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5598 VMX_IGS_TR_ATTR_TYPE_INVALID);
5599 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5600 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5601 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5602 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5603 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5604 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5605 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5606 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5607
5608 /*
5609 * GDTR and IDTR (64-bit capable checks).
5610 */
5611 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5612 AssertRC(rc);
5613 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5614
5615 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5616 AssertRC(rc);
5617 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5618
5619 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5620 AssertRC(rc);
5621 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5622
5623 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5624 AssertRC(rc);
5625 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5626
5627 /*
5628 * Guest Non-Register State.
5629 */
5630 /* Activity State. */
5631 uint32_t u32ActivityState;
5632 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5633 AssertRC(rc);
5634 HMVMX_CHECK_BREAK( !u32ActivityState
5635 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5636 VMX_IGS_ACTIVITY_STATE_INVALID);
5637 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5638 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5639
5640 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5641 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5642 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5643
5644 /** @todo Activity state and injecting interrupts. Left as a todo since we
5645 * currently don't use activity states but ACTIVE. */
5646
5647 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5648 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5649
5650 /* Guest interruptibility-state. */
5651 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5652 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5653 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5654 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5655 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5656 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5657 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5658 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5659 {
5660 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5661 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5662 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5663 }
5664 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5665 {
5666 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5667 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5668 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5669 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5670 }
5671 /** @todo Assumes the processor is not in SMM. */
5672 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5673 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5674 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5675 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5676 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5677 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5678 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5679 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5680
5681 /* Pending debug exceptions. */
5682 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5683 AssertRC(rc);
5684 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5685 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5686 u32Val = u64Val; /* For pending debug exceptions checks below. */
5687
5688 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5689 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5690 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5691 {
5692 if ( (u32Eflags & X86_EFL_TF)
5693 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5694 {
5695 /* Bit 14 is PendingDebug.BS. */
5696 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5697 }
5698 if ( !(u32Eflags & X86_EFL_TF)
5699 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5700 {
5701 /* Bit 14 is PendingDebug.BS. */
5702 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5703 }
5704 }
5705
5706#ifndef IN_NEM_DARWIN
5707 /* VMCS link pointer. */
5708 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5709 AssertRC(rc);
5710 if (u64Val != UINT64_C(0xffffffffffffffff))
5711 {
5712 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5713 /** @todo Bits beyond the processor's physical-address width MBZ. */
5714 /** @todo SMM checks. */
5715 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5716 Assert(pVmcsInfo->pvShadowVmcs);
5717 VMXVMCSREVID VmcsRevId;
5718 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5719 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5720 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5721 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5722 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5723 }
5724
5725 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5726 * not using nested paging? */
5727 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5728 && !fLongModeGuest
5729 && CPUMIsGuestInPAEModeEx(pCtx))
5730 {
5731 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5732 AssertRC(rc);
5733 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5734
5735 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5736 AssertRC(rc);
5737 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5738
5739 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5740 AssertRC(rc);
5741 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5742
5743 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5744 AssertRC(rc);
5745 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5746 }
5747#endif
5748
5749 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5750 if (uError == VMX_IGS_ERROR)
5751 uError = VMX_IGS_REASON_NOT_FOUND;
5752 } while (0);
5753
5754 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5755 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5756 return uError;
5757
5758#undef HMVMX_ERROR_BREAK
5759#undef HMVMX_CHECK_BREAK
5760}
5761
5762
5763#ifndef HMVMX_USE_FUNCTION_TABLE
5764/**
5765 * Handles a guest VM-exit from hardware-assisted VMX execution.
5766 *
5767 * @returns Strict VBox status code (i.e. informational status codes too).
5768 * @param pVCpu The cross context virtual CPU structure.
5769 * @param pVmxTransient The VMX-transient structure.
5770 */
5771DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5772{
5773#ifdef DEBUG_ramshankar
5774# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5775 do { \
5776 if (a_fSave != 0) \
5777 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5778 VBOXSTRICTRC rcStrict = a_CallExpr; \
5779 if (a_fSave != 0) \
5780 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5781 return rcStrict; \
5782 } while (0)
5783#else
5784# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5785#endif
5786 uint32_t const uExitReason = pVmxTransient->uExitReason;
5787 switch (uExitReason)
5788 {
5789 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5790 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5791 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5792 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5793 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5794 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5795 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5796 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5797 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5798 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5799 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5800 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5801 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5802 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5803 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5804 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5805 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5806 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5807 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5808 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5809 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5810 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5811 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5812 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5813 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5814 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5815 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5816 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5817 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5818 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5819#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5820 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5821 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5822 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5823 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5824 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5825 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5826 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5827 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5828 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5829 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5830#else
5831 case VMX_EXIT_VMCLEAR:
5832 case VMX_EXIT_VMLAUNCH:
5833 case VMX_EXIT_VMPTRLD:
5834 case VMX_EXIT_VMPTRST:
5835 case VMX_EXIT_VMREAD:
5836 case VMX_EXIT_VMRESUME:
5837 case VMX_EXIT_VMWRITE:
5838 case VMX_EXIT_VMXOFF:
5839 case VMX_EXIT_VMXON:
5840 case VMX_EXIT_INVVPID:
5841 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5842#endif
5843#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5844 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5845#else
5846 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5847#endif
5848
5849 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5850 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5851 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5852
5853 case VMX_EXIT_INIT_SIGNAL:
5854 case VMX_EXIT_SIPI:
5855 case VMX_EXIT_IO_SMI:
5856 case VMX_EXIT_SMI:
5857 case VMX_EXIT_ERR_MSR_LOAD:
5858 case VMX_EXIT_ERR_MACHINE_CHECK:
5859 case VMX_EXIT_PML_FULL:
5860 case VMX_EXIT_VIRTUALIZED_EOI:
5861 case VMX_EXIT_GDTR_IDTR_ACCESS:
5862 case VMX_EXIT_LDTR_TR_ACCESS:
5863 case VMX_EXIT_APIC_WRITE:
5864 case VMX_EXIT_RDRAND:
5865 case VMX_EXIT_RSM:
5866 case VMX_EXIT_VMFUNC:
5867 case VMX_EXIT_ENCLS:
5868 case VMX_EXIT_RDSEED:
5869 case VMX_EXIT_XSAVES:
5870 case VMX_EXIT_XRSTORS:
5871 case VMX_EXIT_UMWAIT:
5872 case VMX_EXIT_TPAUSE:
5873 case VMX_EXIT_LOADIWKEY:
5874 default:
5875 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5876 }
5877#undef VMEXIT_CALL_RET
5878}
5879#endif /* !HMVMX_USE_FUNCTION_TABLE */
5880
5881
5882#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5883/**
5884 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5885 *
5886 * @returns Strict VBox status code (i.e. informational status codes too).
5887 * @param pVCpu The cross context virtual CPU structure.
5888 * @param pVmxTransient The VMX-transient structure.
5889 */
5890DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5891{
5892 uint32_t const uExitReason = pVmxTransient->uExitReason;
5893 switch (uExitReason)
5894 {
5895# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5896 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5897 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5898# else
5899 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5900 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5901# endif
5902 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5903 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5904 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5905
5906 /*
5907 * We shouldn't direct host physical interrupts to the nested-guest.
5908 */
5909 case VMX_EXIT_EXT_INT:
5910 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5911
5912 /*
5913 * Instructions that cause VM-exits unconditionally or the condition is
5914 * always taken solely from the nested hypervisor (meaning if the VM-exit
5915 * happens, it's guaranteed to be a nested-guest VM-exit).
5916 *
5917 * - Provides VM-exit instruction length ONLY.
5918 */
5919 case VMX_EXIT_CPUID: /* Unconditional. */
5920 case VMX_EXIT_VMCALL:
5921 case VMX_EXIT_GETSEC:
5922 case VMX_EXIT_INVD:
5923 case VMX_EXIT_XSETBV:
5924 case VMX_EXIT_VMLAUNCH:
5925 case VMX_EXIT_VMRESUME:
5926 case VMX_EXIT_VMXOFF:
5927 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5928 case VMX_EXIT_VMFUNC:
5929 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5930
5931 /*
5932 * Instructions that cause VM-exits unconditionally or the condition is
5933 * always taken solely from the nested hypervisor (meaning if the VM-exit
5934 * happens, it's guaranteed to be a nested-guest VM-exit).
5935 *
5936 * - Provides VM-exit instruction length.
5937 * - Provides VM-exit information.
5938 * - Optionally provides Exit qualification.
5939 *
5940 * Since Exit qualification is 0 for all VM-exits where it is not
5941 * applicable, reading and passing it to the guest should produce
5942 * defined behavior.
5943 *
5944 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5945 */
5946 case VMX_EXIT_INVEPT: /* Unconditional. */
5947 case VMX_EXIT_INVVPID:
5948 case VMX_EXIT_VMCLEAR:
5949 case VMX_EXIT_VMPTRLD:
5950 case VMX_EXIT_VMPTRST:
5951 case VMX_EXIT_VMXON:
5952 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5953 case VMX_EXIT_LDTR_TR_ACCESS:
5954 case VMX_EXIT_RDRAND:
5955 case VMX_EXIT_RDSEED:
5956 case VMX_EXIT_XSAVES:
5957 case VMX_EXIT_XRSTORS:
5958 case VMX_EXIT_UMWAIT:
5959 case VMX_EXIT_TPAUSE:
5960 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5961
5962 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5963 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5964 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5965 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5966 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5967 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5968 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5969 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5970 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5971 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5972 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5973 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5974 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5975 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5976 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5977 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5978 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5979 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5980 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5981
5982 case VMX_EXIT_PREEMPT_TIMER:
5983 {
5984 /** @todo NSTVMX: Preempt timer. */
5985 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5986 }
5987
5988 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5989 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5990
5991 case VMX_EXIT_VMREAD:
5992 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5993
5994 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5995 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5996
5997 case VMX_EXIT_INIT_SIGNAL:
5998 case VMX_EXIT_SIPI:
5999 case VMX_EXIT_IO_SMI:
6000 case VMX_EXIT_SMI:
6001 case VMX_EXIT_ERR_MSR_LOAD:
6002 case VMX_EXIT_ERR_MACHINE_CHECK:
6003 case VMX_EXIT_PML_FULL:
6004 case VMX_EXIT_RSM:
6005 default:
6006 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6007 }
6008}
6009#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6010
6011
6012/** @name VM-exit helpers.
6013 * @{
6014 */
6015/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6016/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6017/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6018
6019/** Macro for VM-exits called unexpectedly. */
6020#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6021 do { \
6022 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6023 return VERR_VMX_UNEXPECTED_EXIT; \
6024 } while (0)
6025
6026#ifdef VBOX_STRICT
6027# ifndef IN_NEM_DARWIN
6028/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6029# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6030 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6031
6032# define HMVMX_ASSERT_PREEMPT_CPUID() \
6033 do { \
6034 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6035 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6036 } while (0)
6037
6038# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6039 do { \
6040 AssertPtr((a_pVCpu)); \
6041 AssertPtr((a_pVmxTransient)); \
6042 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6043 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6044 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6045 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6046 Assert((a_pVmxTransient)->pVmcsInfo); \
6047 Assert(ASMIntAreEnabled()); \
6048 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6049 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6050 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6051 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6052 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6053 HMVMX_ASSERT_PREEMPT_CPUID(); \
6054 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6055 } while (0)
6056# else
6057# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6058# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6059# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6060 do { \
6061 AssertPtr((a_pVCpu)); \
6062 AssertPtr((a_pVmxTransient)); \
6063 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6064 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6065 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6066 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6067 Assert((a_pVmxTransient)->pVmcsInfo); \
6068 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6069 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6070 } while (0)
6071# endif
6072
6073# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6074 do { \
6075 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6076 Assert((a_pVmxTransient)->fIsNestedGuest); \
6077 } while (0)
6078
6079# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6080 do { \
6081 Log4Func(("\n")); \
6082 } while (0)
6083#else
6084# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6085 do { \
6086 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6087 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6088 } while (0)
6089
6090# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6091 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6092
6093# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6094#endif
6095
6096#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6097/** Macro that does the necessary privilege checks and intercepted VM-exits for
6098 * guests that attempted to execute a VMX instruction. */
6099# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6100 do \
6101 { \
6102 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6103 if (rcStrictTmp == VINF_SUCCESS) \
6104 { /* likely */ } \
6105 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6106 { \
6107 Assert((a_pVCpu)->hm.s.Event.fPending); \
6108 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6109 return VINF_SUCCESS; \
6110 } \
6111 else \
6112 { \
6113 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6114 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6115 } \
6116 } while (0)
6117
6118/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6119# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6120 do \
6121 { \
6122 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6123 (a_pGCPtrEffAddr)); \
6124 if (rcStrictTmp == VINF_SUCCESS) \
6125 { /* likely */ } \
6126 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6127 { \
6128 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6129 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6130 NOREF(uXcptTmp); \
6131 return VINF_SUCCESS; \
6132 } \
6133 else \
6134 { \
6135 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6136 return rcStrictTmp; \
6137 } \
6138 } while (0)
6139#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6140
6141
6142/**
6143 * Advances the guest RIP by the specified number of bytes.
6144 *
6145 * @param pVCpu The cross context virtual CPU structure.
6146 * @param cbInstr Number of bytes to advance the RIP by.
6147 *
6148 * @remarks No-long-jump zone!!!
6149 */
6150DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6151{
6152 /* Advance the RIP. */
6153 pVCpu->cpum.GstCtx.rip += cbInstr;
6154 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
6155 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
6156 /** @todo clear RF? */
6157}
6158
6159
6160/**
6161 * Advances the guest RIP after reading it from the VMCS.
6162 *
6163 * @returns VBox status code, no informational status codes.
6164 * @param pVCpu The cross context virtual CPU structure.
6165 * @param pVmxTransient The VMX-transient structure.
6166 *
6167 * @remarks No-long-jump zone!!!
6168 */
6169static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6170{
6171 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6172 /** @todo consider template here after checking callers. */
6173 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6174 AssertRCReturn(rc, rc);
6175
6176 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6177 return VINF_SUCCESS;
6178}
6179
6180
6181/**
6182 * Handle a condition that occurred while delivering an event through the guest or
6183 * nested-guest IDT.
6184 *
6185 * @returns Strict VBox status code (i.e. informational status codes too).
6186 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6187 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6188 * to continue execution of the guest which will delivery the \#DF.
6189 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6190 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6191 *
6192 * @param pVCpu The cross context virtual CPU structure.
6193 * @param pVmxTransient The VMX-transient structure.
6194 *
6195 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6196 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6197 * is due to an EPT violation, PML full or SPP-related event.
6198 *
6199 * @remarks No-long-jump zone!!!
6200 */
6201static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6202{
6203 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6204 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6205 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6206 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6207 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6208 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6209
6210 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6211 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6212 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6213 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6214 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6215 {
6216 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6217 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6218
6219 /*
6220 * If the event was a software interrupt (generated with INT n) or a software exception
6221 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6222 * can handle the VM-exit and continue guest execution which will re-execute the
6223 * instruction rather than re-injecting the exception, as that can cause premature
6224 * trips to ring-3 before injection and involve TRPM which currently has no way of
6225 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6226 * the problem).
6227 */
6228 IEMXCPTRAISE enmRaise;
6229 IEMXCPTRAISEINFO fRaiseInfo;
6230 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6231 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6232 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6233 {
6234 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6235 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6236 }
6237 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6238 {
6239 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6240 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6241 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6242
6243 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6244 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6245
6246 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6247
6248 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6249 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6250 {
6251 pVmxTransient->fVectoringPF = true;
6252 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6253 }
6254 }
6255 else
6256 {
6257 /*
6258 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6259 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6260 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6261 */
6262 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6263 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6264 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6265 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6266 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6267 }
6268
6269 /*
6270 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6271 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6272 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6273 * subsequent VM-entry would fail, see @bugref{7445}.
6274 *
6275 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6276 */
6277 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6278 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6279 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6280 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6281 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6282
6283 switch (enmRaise)
6284 {
6285 case IEMXCPTRAISE_CURRENT_XCPT:
6286 {
6287 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6288 Assert(rcStrict == VINF_SUCCESS);
6289 break;
6290 }
6291
6292 case IEMXCPTRAISE_PREV_EVENT:
6293 {
6294 uint32_t u32ErrCode;
6295 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6296 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6297 else
6298 u32ErrCode = 0;
6299
6300 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6301 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6302 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6303 pVCpu->cpum.GstCtx.cr2);
6304
6305 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6306 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6307 Assert(rcStrict == VINF_SUCCESS);
6308 break;
6309 }
6310
6311 case IEMXCPTRAISE_REEXEC_INSTR:
6312 Assert(rcStrict == VINF_SUCCESS);
6313 break;
6314
6315 case IEMXCPTRAISE_DOUBLE_FAULT:
6316 {
6317 /*
6318 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6319 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6320 */
6321 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6322 {
6323 pVmxTransient->fVectoringDoublePF = true;
6324 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6325 pVCpu->cpum.GstCtx.cr2));
6326 rcStrict = VINF_SUCCESS;
6327 }
6328 else
6329 {
6330 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6331 vmxHCSetPendingXcptDF(pVCpu);
6332 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6333 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6334 rcStrict = VINF_HM_DOUBLE_FAULT;
6335 }
6336 break;
6337 }
6338
6339 case IEMXCPTRAISE_TRIPLE_FAULT:
6340 {
6341 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6342 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6343 rcStrict = VINF_EM_RESET;
6344 break;
6345 }
6346
6347 case IEMXCPTRAISE_CPU_HANG:
6348 {
6349 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6350 rcStrict = VERR_EM_GUEST_CPU_HANG;
6351 break;
6352 }
6353
6354 default:
6355 {
6356 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6357 rcStrict = VERR_VMX_IPE_2;
6358 break;
6359 }
6360 }
6361 }
6362 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6363 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6364 {
6365 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6366 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6367 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6368 {
6369 /*
6370 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6371 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6372 * that virtual NMIs remain blocked until the IRET execution is completed.
6373 *
6374 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6375 */
6376 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6377 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6378 }
6379 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6380 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6381 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6382 {
6383 /*
6384 * Execution of IRET caused an EPT violation, page-modification log-full event or
6385 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6386 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6387 * that virtual NMIs remain blocked until the IRET execution is completed.
6388 *
6389 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6390 */
6391 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6392 {
6393 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6394 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6395 }
6396 }
6397 }
6398
6399 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6400 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6401 return rcStrict;
6402}
6403
6404
6405#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6406/**
6407 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6408 * guest attempting to execute a VMX instruction.
6409 *
6410 * @returns Strict VBox status code (i.e. informational status codes too).
6411 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6412 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6413 *
6414 * @param pVCpu The cross context virtual CPU structure.
6415 * @param uExitReason The VM-exit reason.
6416 *
6417 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6418 * @remarks No-long-jump zone!!!
6419 */
6420static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6421{
6422 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6423 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6424
6425 /*
6426 * The physical CPU would have already checked the CPU mode/code segment.
6427 * We shall just assert here for paranoia.
6428 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6429 */
6430 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6431 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6432 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6433
6434 if (uExitReason == VMX_EXIT_VMXON)
6435 {
6436 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6437
6438 /*
6439 * We check CR4.VMXE because it is required to be always set while in VMX operation
6440 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6441 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6442 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6443 */
6444 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6445 {
6446 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6447 vmxHCSetPendingXcptUD(pVCpu);
6448 return VINF_HM_PENDING_XCPT;
6449 }
6450 }
6451 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6452 {
6453 /*
6454 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6455 * (other than VMXON), we need to raise a #UD.
6456 */
6457 Log4Func(("Not in VMX root mode -> #UD\n"));
6458 vmxHCSetPendingXcptUD(pVCpu);
6459 return VINF_HM_PENDING_XCPT;
6460 }
6461
6462 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6463 return VINF_SUCCESS;
6464}
6465
6466
6467/**
6468 * Decodes the memory operand of an instruction that caused a VM-exit.
6469 *
6470 * The Exit qualification field provides the displacement field for memory
6471 * operand instructions, if any.
6472 *
6473 * @returns Strict VBox status code (i.e. informational status codes too).
6474 * @retval VINF_SUCCESS if the operand was successfully decoded.
6475 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6476 * operand.
6477 * @param pVCpu The cross context virtual CPU structure.
6478 * @param uExitInstrInfo The VM-exit instruction information field.
6479 * @param enmMemAccess The memory operand's access type (read or write).
6480 * @param GCPtrDisp The instruction displacement field, if any. For
6481 * RIP-relative addressing pass RIP + displacement here.
6482 * @param pGCPtrMem Where to store the effective destination memory address.
6483 *
6484 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6485 * virtual-8086 mode hence skips those checks while verifying if the
6486 * segment is valid.
6487 */
6488static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6489 PRTGCPTR pGCPtrMem)
6490{
6491 Assert(pGCPtrMem);
6492 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6493 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6494 | CPUMCTX_EXTRN_CR0);
6495
6496 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6497 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6498 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6499
6500 VMXEXITINSTRINFO ExitInstrInfo;
6501 ExitInstrInfo.u = uExitInstrInfo;
6502 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6503 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6504 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6505 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6506 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6507 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6508 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6509 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6510 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6511
6512 /*
6513 * Validate instruction information.
6514 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6515 */
6516 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6517 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6518 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6519 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6520 AssertLogRelMsgReturn(fIsMemOperand,
6521 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6522
6523 /*
6524 * Compute the complete effective address.
6525 *
6526 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6527 * See AMD spec. 4.5.2 "Segment Registers".
6528 */
6529 RTGCPTR GCPtrMem = GCPtrDisp;
6530 if (fBaseRegValid)
6531 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6532 if (fIdxRegValid)
6533 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6534
6535 RTGCPTR const GCPtrOff = GCPtrMem;
6536 if ( !fIsLongMode
6537 || iSegReg >= X86_SREG_FS)
6538 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6539 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6540
6541 /*
6542 * Validate effective address.
6543 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6544 */
6545 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6546 Assert(cbAccess > 0);
6547 if (fIsLongMode)
6548 {
6549 if (X86_IS_CANONICAL(GCPtrMem))
6550 {
6551 *pGCPtrMem = GCPtrMem;
6552 return VINF_SUCCESS;
6553 }
6554
6555 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6556 * "Data Limit Checks in 64-bit Mode". */
6557 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6558 vmxHCSetPendingXcptGP(pVCpu, 0);
6559 return VINF_HM_PENDING_XCPT;
6560 }
6561
6562 /*
6563 * This is a watered down version of iemMemApplySegment().
6564 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6565 * and segment CPL/DPL checks are skipped.
6566 */
6567 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6568 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6569 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6570
6571 /* Check if the segment is present and usable. */
6572 if ( pSel->Attr.n.u1Present
6573 && !pSel->Attr.n.u1Unusable)
6574 {
6575 Assert(pSel->Attr.n.u1DescType);
6576 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6577 {
6578 /* Check permissions for the data segment. */
6579 if ( enmMemAccess == VMXMEMACCESS_WRITE
6580 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6581 {
6582 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6583 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6584 return VINF_HM_PENDING_XCPT;
6585 }
6586
6587 /* Check limits if it's a normal data segment. */
6588 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6589 {
6590 if ( GCPtrFirst32 > pSel->u32Limit
6591 || GCPtrLast32 > pSel->u32Limit)
6592 {
6593 Log4Func(("Data segment limit exceeded. "
6594 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6595 GCPtrLast32, pSel->u32Limit));
6596 if (iSegReg == X86_SREG_SS)
6597 vmxHCSetPendingXcptSS(pVCpu, 0);
6598 else
6599 vmxHCSetPendingXcptGP(pVCpu, 0);
6600 return VINF_HM_PENDING_XCPT;
6601 }
6602 }
6603 else
6604 {
6605 /* Check limits if it's an expand-down data segment.
6606 Note! The upper boundary is defined by the B bit, not the G bit! */
6607 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6608 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6609 {
6610 Log4Func(("Expand-down data segment limit exceeded. "
6611 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6612 GCPtrLast32, pSel->u32Limit));
6613 if (iSegReg == X86_SREG_SS)
6614 vmxHCSetPendingXcptSS(pVCpu, 0);
6615 else
6616 vmxHCSetPendingXcptGP(pVCpu, 0);
6617 return VINF_HM_PENDING_XCPT;
6618 }
6619 }
6620 }
6621 else
6622 {
6623 /* Check permissions for the code segment. */
6624 if ( enmMemAccess == VMXMEMACCESS_WRITE
6625 || ( enmMemAccess == VMXMEMACCESS_READ
6626 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6627 {
6628 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6629 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6630 vmxHCSetPendingXcptGP(pVCpu, 0);
6631 return VINF_HM_PENDING_XCPT;
6632 }
6633
6634 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6635 if ( GCPtrFirst32 > pSel->u32Limit
6636 || GCPtrLast32 > pSel->u32Limit)
6637 {
6638 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6639 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6640 if (iSegReg == X86_SREG_SS)
6641 vmxHCSetPendingXcptSS(pVCpu, 0);
6642 else
6643 vmxHCSetPendingXcptGP(pVCpu, 0);
6644 return VINF_HM_PENDING_XCPT;
6645 }
6646 }
6647 }
6648 else
6649 {
6650 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6651 vmxHCSetPendingXcptGP(pVCpu, 0);
6652 return VINF_HM_PENDING_XCPT;
6653 }
6654
6655 *pGCPtrMem = GCPtrMem;
6656 return VINF_SUCCESS;
6657}
6658#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6659
6660
6661/**
6662 * VM-exit helper for LMSW.
6663 */
6664static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6665{
6666 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6667 AssertRCReturn(rc, rc);
6668
6669 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6670 AssertMsg( rcStrict == VINF_SUCCESS
6671 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6672
6673 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6674 if (rcStrict == VINF_IEM_RAISED_XCPT)
6675 {
6676 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6677 rcStrict = VINF_SUCCESS;
6678 }
6679
6680 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6681 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6682 return rcStrict;
6683}
6684
6685
6686/**
6687 * VM-exit helper for CLTS.
6688 */
6689static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6690{
6691 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6692 AssertRCReturn(rc, rc);
6693
6694 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6695 AssertMsg( rcStrict == VINF_SUCCESS
6696 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6697
6698 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6699 if (rcStrict == VINF_IEM_RAISED_XCPT)
6700 {
6701 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6702 rcStrict = VINF_SUCCESS;
6703 }
6704
6705 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6706 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6707 return rcStrict;
6708}
6709
6710
6711/**
6712 * VM-exit helper for MOV from CRx (CRx read).
6713 */
6714static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6715{
6716 Assert(iCrReg < 16);
6717 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6718
6719 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6720 AssertRCReturn(rc, rc);
6721
6722 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6723 AssertMsg( rcStrict == VINF_SUCCESS
6724 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6725
6726 if (iGReg == X86_GREG_xSP)
6727 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6728 else
6729 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6730#ifdef VBOX_WITH_STATISTICS
6731 switch (iCrReg)
6732 {
6733 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6734 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6735 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6736 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6737 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6738 }
6739#endif
6740 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6741 return rcStrict;
6742}
6743
6744
6745/**
6746 * VM-exit helper for MOV to CRx (CRx write).
6747 */
6748static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6749{
6750 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6751
6752 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6753 AssertMsg( rcStrict == VINF_SUCCESS
6754 || rcStrict == VINF_IEM_RAISED_XCPT
6755 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6756
6757 switch (iCrReg)
6758 {
6759 case 0:
6760 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6761 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6762 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6763 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6764 break;
6765
6766 case 2:
6767 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6768 /* Nothing to do here, CR2 it's not part of the VMCS. */
6769 break;
6770
6771 case 3:
6772 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6773 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6774 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6775 break;
6776
6777 case 4:
6778 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6779 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6780#ifndef IN_NEM_DARWIN
6781 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6782 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6783#else
6784 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6785#endif
6786 break;
6787
6788 case 8:
6789 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6790 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6791 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6792 break;
6793
6794 default:
6795 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6796 break;
6797 }
6798
6799 if (rcStrict == VINF_IEM_RAISED_XCPT)
6800 {
6801 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6802 rcStrict = VINF_SUCCESS;
6803 }
6804 return rcStrict;
6805}
6806
6807
6808/**
6809 * VM-exit exception handler for \#PF (Page-fault exception).
6810 *
6811 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6812 */
6813static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6814{
6815 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6816 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6817
6818#ifndef IN_NEM_DARWIN
6819 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6820 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6821 { /* likely */ }
6822 else
6823#endif
6824 {
6825#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6826 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6827#endif
6828 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6829 if (!pVmxTransient->fVectoringDoublePF)
6830 {
6831 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6832 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6833 }
6834 else
6835 {
6836 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6837 Assert(!pVmxTransient->fIsNestedGuest);
6838 vmxHCSetPendingXcptDF(pVCpu);
6839 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6840 }
6841 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6842 return VINF_SUCCESS;
6843 }
6844
6845 Assert(!pVmxTransient->fIsNestedGuest);
6846
6847 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6848 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6849 if (pVmxTransient->fVectoringPF)
6850 {
6851 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6852 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6853 }
6854
6855 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6856 AssertRCReturn(rc, rc);
6857
6858 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6859 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6860
6861 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6862 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6863
6864 Log4Func(("#PF: rc=%Rrc\n", rc));
6865 if (rc == VINF_SUCCESS)
6866 {
6867 /*
6868 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6869 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6870 */
6871 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6872 TRPMResetTrap(pVCpu);
6873 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6874 return rc;
6875 }
6876
6877 if (rc == VINF_EM_RAW_GUEST_TRAP)
6878 {
6879 if (!pVmxTransient->fVectoringDoublePF)
6880 {
6881 /* It's a guest page fault and needs to be reflected to the guest. */
6882 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6883 TRPMResetTrap(pVCpu);
6884 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6885 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6886 uGstErrorCode, pVmxTransient->uExitQual);
6887 }
6888 else
6889 {
6890 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6891 TRPMResetTrap(pVCpu);
6892 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6893 vmxHCSetPendingXcptDF(pVCpu);
6894 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6895 }
6896
6897 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6898 return VINF_SUCCESS;
6899 }
6900
6901 TRPMResetTrap(pVCpu);
6902 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6903 return rc;
6904}
6905
6906
6907/**
6908 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6909 *
6910 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6911 */
6912static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6913{
6914 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6915 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6916
6917 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6918 AssertRCReturn(rc, rc);
6919
6920 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6921 {
6922 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6923 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6924
6925 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6926 * provides VM-exit instruction length. If this causes problem later,
6927 * disassemble the instruction like it's done on AMD-V. */
6928 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6929 AssertRCReturn(rc2, rc2);
6930 return rc;
6931 }
6932
6933 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6934 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6935 return VINF_SUCCESS;
6936}
6937
6938
6939/**
6940 * VM-exit exception handler for \#BP (Breakpoint exception).
6941 *
6942 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6943 */
6944static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6945{
6946 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6947 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6948
6949 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6950 AssertRCReturn(rc, rc);
6951
6952 VBOXSTRICTRC rcStrict;
6953 if (!pVmxTransient->fIsNestedGuest)
6954 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
6955 else
6956 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6957
6958 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6959 {
6960 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6961 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6962 rcStrict = VINF_SUCCESS;
6963 }
6964
6965 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6966 return rcStrict;
6967}
6968
6969
6970/**
6971 * VM-exit exception handler for \#AC (Alignment-check exception).
6972 *
6973 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6974 */
6975static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6976{
6977 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6978
6979 /*
6980 * Detect #ACs caused by host having enabled split-lock detection.
6981 * Emulate such instructions.
6982 */
6983#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
6984 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6985 AssertRCReturn(rc, rc);
6986 /** @todo detect split lock in cpu feature? */
6987 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6988 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6989 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6990 || CPUMGetGuestCPL(pVCpu) != 3
6991 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6992 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6993 {
6994 /*
6995 * Check for debug/trace events and import state accordingly.
6996 */
6997 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6998 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6999 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7000#ifndef IN_NEM_DARWIN
7001 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7002#endif
7003 )
7004 {
7005 if (pVM->cCpus == 1)
7006 {
7007#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7008 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7009 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7010#else
7011 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7012 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7013#endif
7014 AssertRCReturn(rc, rc);
7015 }
7016 }
7017 else
7018 {
7019 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7020 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7021 AssertRCReturn(rc, rc);
7022
7023 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7024
7025 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7026 {
7027 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7028 if (rcStrict != VINF_SUCCESS)
7029 return rcStrict;
7030 }
7031 }
7032
7033 /*
7034 * Emulate the instruction.
7035 *
7036 * We have to ignore the LOCK prefix here as we must not retrigger the
7037 * detection on the host. This isn't all that satisfactory, though...
7038 */
7039 if (pVM->cCpus == 1)
7040 {
7041 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7042 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7043
7044 /** @todo For SMP configs we should do a rendezvous here. */
7045 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7046 if (rcStrict == VINF_SUCCESS)
7047#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7048 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7049 HM_CHANGED_GUEST_RIP
7050 | HM_CHANGED_GUEST_RFLAGS
7051 | HM_CHANGED_GUEST_GPRS_MASK
7052 | HM_CHANGED_GUEST_CS
7053 | HM_CHANGED_GUEST_SS);
7054#else
7055 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7056#endif
7057 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7058 {
7059 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7060 rcStrict = VINF_SUCCESS;
7061 }
7062 return rcStrict;
7063 }
7064 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7065 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7066 return VINF_EM_EMULATE_SPLIT_LOCK;
7067 }
7068
7069 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7070 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7071 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7072
7073 /* Re-inject it. We'll detect any nesting before getting here. */
7074 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7075 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7076 return VINF_SUCCESS;
7077}
7078
7079
7080/**
7081 * VM-exit exception handler for \#DB (Debug exception).
7082 *
7083 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7084 */
7085static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7086{
7087 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7088 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7089
7090 /*
7091 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7092 */
7093 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7094
7095 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7096 uint64_t const uDR6 = X86_DR6_INIT_VAL
7097 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7098 | X86_DR6_BD | X86_DR6_BS));
7099
7100 int rc;
7101 if (!pVmxTransient->fIsNestedGuest)
7102 {
7103 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7104
7105 /*
7106 * Prevents stepping twice over the same instruction when the guest is stepping using
7107 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7108 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7109 */
7110 if ( rc == VINF_EM_DBG_STEPPED
7111 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7112 {
7113 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7114 rc = VINF_EM_RAW_GUEST_TRAP;
7115 }
7116 }
7117 else
7118 rc = VINF_EM_RAW_GUEST_TRAP;
7119 Log6Func(("rc=%Rrc\n", rc));
7120 if (rc == VINF_EM_RAW_GUEST_TRAP)
7121 {
7122 /*
7123 * The exception was for the guest. Update DR6, DR7.GD and
7124 * IA32_DEBUGCTL.LBR before forwarding it.
7125 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
7126 */
7127#ifndef IN_NEM_DARWIN
7128 VMMRZCallRing3Disable(pVCpu);
7129 HM_DISABLE_PREEMPT(pVCpu);
7130
7131 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7132 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7133 if (CPUMIsGuestDebugStateActive(pVCpu))
7134 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7135
7136 HM_RESTORE_PREEMPT();
7137 VMMRZCallRing3Enable(pVCpu);
7138#else
7139 /** @todo */
7140#endif
7141
7142 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7143 AssertRCReturn(rc, rc);
7144
7145 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7146 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7147
7148 /* Paranoia. */
7149 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7150 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7151
7152 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7153 AssertRC(rc);
7154
7155 /*
7156 * Raise #DB in the guest.
7157 *
7158 * It is important to reflect exactly what the VM-exit gave us (preserving the
7159 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7160 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7161 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7162 *
7163 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7164 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7165 */
7166 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7167 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7168 return VINF_SUCCESS;
7169 }
7170
7171 /*
7172 * Not a guest trap, must be a hypervisor related debug event then.
7173 * Update DR6 in case someone is interested in it.
7174 */
7175 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7176 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7177 CPUMSetHyperDR6(pVCpu, uDR6);
7178
7179 return rc;
7180}
7181
7182
7183/**
7184 * Hacks its way around the lovely mesa driver's backdoor accesses.
7185 *
7186 * @sa hmR0SvmHandleMesaDrvGp.
7187 */
7188static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7189{
7190 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7191 RT_NOREF(pCtx);
7192
7193 /* For now we'll just skip the instruction. */
7194 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7195}
7196
7197
7198/**
7199 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7200 * backdoor logging w/o checking what it is running inside.
7201 *
7202 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7203 * backdoor port and magic numbers loaded in registers.
7204 *
7205 * @returns true if it is, false if it isn't.
7206 * @sa hmR0SvmIsMesaDrvGp.
7207 */
7208DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7209{
7210 /* 0xed: IN eAX,dx */
7211 uint8_t abInstr[1];
7212 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7213 return false;
7214
7215 /* Check that it is #GP(0). */
7216 if (pVmxTransient->uExitIntErrorCode != 0)
7217 return false;
7218
7219 /* Check magic and port. */
7220 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7221 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7222 if (pCtx->rax != UINT32_C(0x564d5868))
7223 return false;
7224 if (pCtx->dx != UINT32_C(0x5658))
7225 return false;
7226
7227 /* Flat ring-3 CS. */
7228 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7229 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7230 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7231 if (pCtx->cs.Attr.n.u2Dpl != 3)
7232 return false;
7233 if (pCtx->cs.u64Base != 0)
7234 return false;
7235
7236 /* Check opcode. */
7237 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7238 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7239 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7240 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7241 if (RT_FAILURE(rc))
7242 return false;
7243 if (abInstr[0] != 0xed)
7244 return false;
7245
7246 return true;
7247}
7248
7249
7250/**
7251 * VM-exit exception handler for \#GP (General-protection exception).
7252 *
7253 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7254 */
7255static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7256{
7257 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7258 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7259
7260 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7261 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7262#ifndef IN_NEM_DARWIN
7263 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7264 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7265 { /* likely */ }
7266 else
7267#endif
7268 {
7269#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7270# ifndef IN_NEM_DARWIN
7271 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7272# else
7273 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7274# endif
7275#endif
7276 /*
7277 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7278 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7279 */
7280 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7281 AssertRCReturn(rc, rc);
7282 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7283 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7284
7285 if ( pVmxTransient->fIsNestedGuest
7286 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7287 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7288 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7289 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7290 else
7291 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7292 return rc;
7293 }
7294
7295#ifndef IN_NEM_DARWIN
7296 Assert(CPUMIsGuestInRealModeEx(pCtx));
7297 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7298 Assert(!pVmxTransient->fIsNestedGuest);
7299
7300 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7301 AssertRCReturn(rc, rc);
7302
7303 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7304 if (rcStrict == VINF_SUCCESS)
7305 {
7306 if (!CPUMIsGuestInRealModeEx(pCtx))
7307 {
7308 /*
7309 * The guest is no longer in real-mode, check if we can continue executing the
7310 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7311 */
7312 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7313 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7314 {
7315 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7316 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7317 }
7318 else
7319 {
7320 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7321 rcStrict = VINF_EM_RESCHEDULE;
7322 }
7323 }
7324 else
7325 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7326 }
7327 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7328 {
7329 rcStrict = VINF_SUCCESS;
7330 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7331 }
7332 return VBOXSTRICTRC_VAL(rcStrict);
7333#endif
7334}
7335
7336
7337/**
7338 * VM-exit exception handler for \#DE (Divide Error).
7339 *
7340 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7341 */
7342static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7343{
7344 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7345 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7346
7347 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7348 AssertRCReturn(rc, rc);
7349
7350 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7351 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7352 {
7353 uint8_t cbInstr = 0;
7354 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7355 if (rc2 == VINF_SUCCESS)
7356 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7357 else if (rc2 == VERR_NOT_FOUND)
7358 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7359 else
7360 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7361 }
7362 else
7363 rcStrict = VINF_SUCCESS; /* Do nothing. */
7364
7365 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7366 if (RT_FAILURE(rcStrict))
7367 {
7368 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7369 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7370 rcStrict = VINF_SUCCESS;
7371 }
7372
7373 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7374 return VBOXSTRICTRC_VAL(rcStrict);
7375}
7376
7377
7378/**
7379 * VM-exit exception handler wrapper for all other exceptions that are not handled
7380 * by a specific handler.
7381 *
7382 * This simply re-injects the exception back into the VM without any special
7383 * processing.
7384 *
7385 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7386 */
7387static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7388{
7389 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7390
7391#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7392# ifndef IN_NEM_DARWIN
7393 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7394 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7395 ("uVector=%#x u32XcptBitmap=%#X32\n",
7396 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7397 NOREF(pVmcsInfo);
7398# endif
7399#endif
7400
7401 /*
7402 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7403 * would have been handled while checking exits due to event delivery.
7404 */
7405 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7406
7407#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7408 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7409 AssertRCReturn(rc, rc);
7410 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7411#endif
7412
7413#ifdef VBOX_WITH_STATISTICS
7414 switch (uVector)
7415 {
7416 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7417 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7418 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7419 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7420 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7421 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7422 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7423 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7424 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7425 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7426 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7427 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7428 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7429 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7430 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7431 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7432 default:
7433 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7434 break;
7435 }
7436#endif
7437
7438 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7439 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7440 NOREF(uVector);
7441
7442 /* Re-inject the original exception into the guest. */
7443 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7444 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7445 return VINF_SUCCESS;
7446}
7447
7448
7449/**
7450 * VM-exit exception handler for all exceptions (except NMIs!).
7451 *
7452 * @remarks This may be called for both guests and nested-guests. Take care to not
7453 * make assumptions and avoid doing anything that is not relevant when
7454 * executing a nested-guest (e.g., Mesa driver hacks).
7455 */
7456static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7457{
7458 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7459
7460 /*
7461 * If this VM-exit occurred while delivering an event through the guest IDT, take
7462 * action based on the return code and additional hints (e.g. for page-faults)
7463 * that will be updated in the VMX transient structure.
7464 */
7465 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7466 if (rcStrict == VINF_SUCCESS)
7467 {
7468 /*
7469 * If an exception caused a VM-exit due to delivery of an event, the original
7470 * event may have to be re-injected into the guest. We shall reinject it and
7471 * continue guest execution. However, page-fault is a complicated case and
7472 * needs additional processing done in vmxHCExitXcptPF().
7473 */
7474 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7475 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7476 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7477 || uVector == X86_XCPT_PF)
7478 {
7479 switch (uVector)
7480 {
7481 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7482 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7483 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7484 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7485 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7486 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7487 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7488 default:
7489 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7490 }
7491 }
7492 /* else: inject pending event before resuming guest execution. */
7493 }
7494 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7495 {
7496 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7497 rcStrict = VINF_SUCCESS;
7498 }
7499
7500 return rcStrict;
7501}
7502/** @} */
7503
7504
7505/** @name VM-exit handlers.
7506 * @{
7507 */
7508/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7509/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7510/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7511
7512/**
7513 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7514 */
7515HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7516{
7517 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7518 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7519
7520#ifndef IN_NEM_DARWIN
7521 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7522 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7523 return VINF_SUCCESS;
7524 return VINF_EM_RAW_INTERRUPT;
7525#else
7526 return VINF_SUCCESS;
7527#endif
7528}
7529
7530
7531/**
7532 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7533 * VM-exit.
7534 */
7535HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7536{
7537 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7538 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7539
7540 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7541
7542 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7543 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7544 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7545
7546 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7547 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7548 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7549 NOREF(pVmcsInfo);
7550
7551 VBOXSTRICTRC rcStrict;
7552 switch (uExitIntType)
7553 {
7554#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7555 /*
7556 * Host physical NMIs:
7557 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7558 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7559 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7560 *
7561 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7562 * See Intel spec. 27.5.5 "Updating Non-Register State".
7563 */
7564 case VMX_EXIT_INT_INFO_TYPE_NMI:
7565 {
7566 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7567 break;
7568 }
7569#endif
7570
7571 /*
7572 * Privileged software exceptions (#DB from ICEBP),
7573 * Software exceptions (#BP and #OF),
7574 * Hardware exceptions:
7575 * Process the required exceptions and resume guest execution if possible.
7576 */
7577 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7578 Assert(uVector == X86_XCPT_DB);
7579 RT_FALL_THRU();
7580 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7581 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7582 RT_FALL_THRU();
7583 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7584 {
7585 NOREF(uVector);
7586 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7587 | HMVMX_READ_EXIT_INSTR_LEN
7588 | HMVMX_READ_IDT_VECTORING_INFO
7589 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7590 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7591 break;
7592 }
7593
7594 default:
7595 {
7596 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7597 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7598 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7599 break;
7600 }
7601 }
7602
7603 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7604 return rcStrict;
7605}
7606
7607
7608/**
7609 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7610 */
7611HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7612{
7613 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7614
7615 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7616 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7617 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7618
7619 /* Evaluate and deliver pending events and resume guest execution. */
7620 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7621 return VINF_SUCCESS;
7622}
7623
7624
7625/**
7626 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7627 */
7628HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7629{
7630 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7631
7632 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7633 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7634 {
7635 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7636 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7637 }
7638
7639 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7640
7641 /*
7642 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7643 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7644 */
7645 uint32_t fIntrState;
7646 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7647 AssertRC(rc);
7648 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7649 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7650 {
7651 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7652
7653 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7654 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7655 AssertRC(rc);
7656 }
7657
7658 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7659 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7660
7661 /* Evaluate and deliver pending events and resume guest execution. */
7662 return VINF_SUCCESS;
7663}
7664
7665
7666/**
7667 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7668 */
7669HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7670{
7671 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7672 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7673}
7674
7675
7676/**
7677 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7678 */
7679HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7680{
7681 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7682 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7683}
7684
7685
7686/**
7687 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7688 */
7689HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7690{
7691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7692
7693 /*
7694 * Get the state we need and update the exit history entry.
7695 */
7696 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7697 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7698 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7699 AssertRCReturn(rc, rc);
7700
7701 VBOXSTRICTRC rcStrict;
7702 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7703 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7704 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7705 if (!pExitRec)
7706 {
7707 /*
7708 * Regular CPUID instruction execution.
7709 */
7710 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7711 if (rcStrict == VINF_SUCCESS)
7712 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7713 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7714 {
7715 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7716 rcStrict = VINF_SUCCESS;
7717 }
7718 }
7719 else
7720 {
7721 /*
7722 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7723 */
7724 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7725 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7726 AssertRCReturn(rc2, rc2);
7727
7728 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7729 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7730
7731 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7732 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7733
7734 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7735 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7736 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7737 }
7738 return rcStrict;
7739}
7740
7741
7742/**
7743 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7744 */
7745HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7746{
7747 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7748
7749 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7750 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7751 AssertRCReturn(rc, rc);
7752
7753 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7754 return VINF_EM_RAW_EMULATE_INSTR;
7755
7756 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7757 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7758}
7759
7760
7761/**
7762 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7763 */
7764HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7765{
7766 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7767
7768 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7769 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7770 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7771 AssertRCReturn(rc, rc);
7772
7773 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7774 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7775 {
7776 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7777 we must reset offsetting on VM-entry. See @bugref{6634}. */
7778 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7779 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7780 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7781 }
7782 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7783 {
7784 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7785 rcStrict = VINF_SUCCESS;
7786 }
7787 return rcStrict;
7788}
7789
7790
7791/**
7792 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7793 */
7794HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7795{
7796 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7797
7798 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7799 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7800 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7801 AssertRCReturn(rc, rc);
7802
7803 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7804 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7805 {
7806 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7807 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7808 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7809 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7810 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7811 }
7812 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7813 {
7814 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7815 rcStrict = VINF_SUCCESS;
7816 }
7817 return rcStrict;
7818}
7819
7820
7821/**
7822 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7823 */
7824HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7825{
7826 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7827
7828 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7829 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7830 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7831 AssertRCReturn(rc, rc);
7832
7833 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7834 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7835 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7836 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7837 {
7838 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7839 rcStrict = VINF_SUCCESS;
7840 }
7841 return rcStrict;
7842}
7843
7844
7845/**
7846 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7847 */
7848HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7849{
7850 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7851
7852 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7853 if (EMAreHypercallInstructionsEnabled(pVCpu))
7854 {
7855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7856 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7857 | CPUMCTX_EXTRN_RFLAGS
7858 | CPUMCTX_EXTRN_CR0
7859 | CPUMCTX_EXTRN_SS
7860 | CPUMCTX_EXTRN_CS
7861 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7862 AssertRCReturn(rc, rc);
7863
7864 /* Perform the hypercall. */
7865 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7866 if (rcStrict == VINF_SUCCESS)
7867 {
7868 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7869 AssertRCReturn(rc, rc);
7870 }
7871 else
7872 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7873 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7874 || RT_FAILURE(rcStrict));
7875
7876 /* If the hypercall changes anything other than guest's general-purpose registers,
7877 we would need to reload the guest changed bits here before VM-entry. */
7878 }
7879 else
7880 Log4Func(("Hypercalls not enabled\n"));
7881
7882 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7883 if (RT_FAILURE(rcStrict))
7884 {
7885 vmxHCSetPendingXcptUD(pVCpu);
7886 rcStrict = VINF_SUCCESS;
7887 }
7888
7889 return rcStrict;
7890}
7891
7892
7893/**
7894 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7895 */
7896HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7897{
7898 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7899#ifndef IN_NEM_DARWIN
7900 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7901#endif
7902
7903 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7904 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7905 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7906 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7907 AssertRCReturn(rc, rc);
7908
7909 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7910
7911 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7912 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7913 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7914 {
7915 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7916 rcStrict = VINF_SUCCESS;
7917 }
7918 else
7919 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7920 VBOXSTRICTRC_VAL(rcStrict)));
7921 return rcStrict;
7922}
7923
7924
7925/**
7926 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7927 */
7928HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7929{
7930 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7931
7932 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7933 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7934 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7935 AssertRCReturn(rc, rc);
7936
7937 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7938 if (rcStrict == VINF_SUCCESS)
7939 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7940 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7941 {
7942 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7943 rcStrict = VINF_SUCCESS;
7944 }
7945
7946 return rcStrict;
7947}
7948
7949
7950/**
7951 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7952 */
7953HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7954{
7955 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7956
7957 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7958 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7959 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7960 AssertRCReturn(rc, rc);
7961
7962 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7963 if (RT_SUCCESS(rcStrict))
7964 {
7965 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7966 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7967 rcStrict = VINF_SUCCESS;
7968 }
7969
7970 return rcStrict;
7971}
7972
7973
7974/**
7975 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7976 * VM-exit.
7977 */
7978HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7979{
7980 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7981 return VINF_EM_RESET;
7982}
7983
7984
7985/**
7986 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7987 */
7988HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7989{
7990 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7991
7992 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7993 AssertRCReturn(rc, rc);
7994
7995 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7996 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7997 rc = VINF_SUCCESS;
7998 else
7999 rc = VINF_EM_HALT;
8000
8001 if (rc != VINF_SUCCESS)
8002 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8003 return rc;
8004}
8005
8006
8007#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8008/**
8009 * VM-exit handler for instructions that result in a \#UD exception delivered to
8010 * the guest.
8011 */
8012HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8013{
8014 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8015 vmxHCSetPendingXcptUD(pVCpu);
8016 return VINF_SUCCESS;
8017}
8018#endif
8019
8020
8021/**
8022 * VM-exit handler for expiry of the VMX-preemption timer.
8023 */
8024HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8025{
8026 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8027
8028 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8029 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8030Log12(("vmxHCExitPreemptTimer:\n"));
8031
8032 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8033 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8034 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8035 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8036 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8037}
8038
8039
8040/**
8041 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8042 */
8043HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8044{
8045 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8046
8047 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8048 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8049 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8050 AssertRCReturn(rc, rc);
8051
8052 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8053 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8054 : HM_CHANGED_RAISED_XCPT_MASK);
8055
8056#ifndef IN_NEM_DARWIN
8057 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8058 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8059 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8060 {
8061 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8062 hmR0VmxUpdateStartVmFunction(pVCpu);
8063 }
8064#endif
8065
8066 return rcStrict;
8067}
8068
8069
8070/**
8071 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8072 */
8073HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8074{
8075 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8076
8077 /** @todo Enable the new code after finding a reliably guest test-case. */
8078#if 1
8079 return VERR_EM_INTERPRETER;
8080#else
8081 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8082 | HMVMX_READ_EXIT_INSTR_INFO
8083 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8084 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8085 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8086 AssertRCReturn(rc, rc);
8087
8088 /* Paranoia. Ensure this has a memory operand. */
8089 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8090
8091 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8092 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8093 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8094 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8095
8096 RTGCPTR GCPtrDesc;
8097 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8098
8099 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8100 GCPtrDesc, uType);
8101 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8103 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8104 {
8105 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8106 rcStrict = VINF_SUCCESS;
8107 }
8108 return rcStrict;
8109#endif
8110}
8111
8112
8113/**
8114 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8115 * VM-exit.
8116 */
8117HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8118{
8119 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8120 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8121 AssertRCReturn(rc, rc);
8122
8123 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8124 if (RT_FAILURE(rc))
8125 return rc;
8126
8127 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8128 NOREF(uInvalidReason);
8129
8130#ifdef VBOX_STRICT
8131 uint32_t fIntrState;
8132 uint64_t u64Val;
8133 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8134 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8135 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8136
8137 Log4(("uInvalidReason %u\n", uInvalidReason));
8138 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8139 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8140 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8141
8142 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8143 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8144 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8145 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8146 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8147 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8148 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8149 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8150 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8151 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8152 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8153 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8154# ifndef IN_NEM_DARWIN
8155 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8156 {
8157 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8158 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8159 }
8160
8161 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8162# endif
8163#endif
8164
8165 return VERR_VMX_INVALID_GUEST_STATE;
8166}
8167
8168/**
8169 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8170 */
8171HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8172{
8173 /*
8174 * Cumulative notes of all recognized but unexpected VM-exits.
8175 *
8176 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8177 * nested-paging is used.
8178 *
8179 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8180 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8181 * this function (and thereby stop VM execution) for handling such instructions.
8182 *
8183 *
8184 * VMX_EXIT_INIT_SIGNAL:
8185 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8186 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8187 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8188 *
8189 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8190 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8191 * See Intel spec. "23.8 Restrictions on VMX operation".
8192 *
8193 * VMX_EXIT_SIPI:
8194 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8195 * activity state is used. We don't make use of it as our guests don't have direct
8196 * access to the host local APIC.
8197 *
8198 * See Intel spec. 25.3 "Other Causes of VM-exits".
8199 *
8200 * VMX_EXIT_IO_SMI:
8201 * VMX_EXIT_SMI:
8202 * This can only happen if we support dual-monitor treatment of SMI, which can be
8203 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8204 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8205 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8206 *
8207 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8208 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8209 *
8210 * VMX_EXIT_ERR_MSR_LOAD:
8211 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8212 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8213 * execution.
8214 *
8215 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8216 *
8217 * VMX_EXIT_ERR_MACHINE_CHECK:
8218 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8219 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8220 * #MC exception abort class exception is raised. We thus cannot assume a
8221 * reasonable chance of continuing any sort of execution and we bail.
8222 *
8223 * See Intel spec. 15.1 "Machine-check Architecture".
8224 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8225 *
8226 * VMX_EXIT_PML_FULL:
8227 * VMX_EXIT_VIRTUALIZED_EOI:
8228 * VMX_EXIT_APIC_WRITE:
8229 * We do not currently support any of these features and thus they are all unexpected
8230 * VM-exits.
8231 *
8232 * VMX_EXIT_GDTR_IDTR_ACCESS:
8233 * VMX_EXIT_LDTR_TR_ACCESS:
8234 * VMX_EXIT_RDRAND:
8235 * VMX_EXIT_RSM:
8236 * VMX_EXIT_VMFUNC:
8237 * VMX_EXIT_ENCLS:
8238 * VMX_EXIT_RDSEED:
8239 * VMX_EXIT_XSAVES:
8240 * VMX_EXIT_XRSTORS:
8241 * VMX_EXIT_UMWAIT:
8242 * VMX_EXIT_TPAUSE:
8243 * VMX_EXIT_LOADIWKEY:
8244 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8245 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8246 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8247 *
8248 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8249 */
8250 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8251 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8252 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8253}
8254
8255
8256/**
8257 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8258 */
8259HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8260{
8261 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8262
8263 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8264
8265 /** @todo Optimize this: We currently drag in the whole MSR state
8266 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8267 * MSRs required. That would require changes to IEM and possibly CPUM too.
8268 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8269 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8270 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8271 int rc;
8272 switch (idMsr)
8273 {
8274 default:
8275 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8276 __FUNCTION__);
8277 AssertRCReturn(rc, rc);
8278 break;
8279 case MSR_K8_FS_BASE:
8280 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8281 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8282 AssertRCReturn(rc, rc);
8283 break;
8284 case MSR_K8_GS_BASE:
8285 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8286 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8287 AssertRCReturn(rc, rc);
8288 break;
8289 }
8290
8291 Log4Func(("ecx=%#RX32\n", idMsr));
8292
8293#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8294 Assert(!pVmxTransient->fIsNestedGuest);
8295 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8296 {
8297 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8298 && idMsr != MSR_K6_EFER)
8299 {
8300 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8301 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8302 }
8303 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8304 {
8305 Assert(pVmcsInfo->pvMsrBitmap);
8306 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8307 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8308 {
8309 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8310 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8311 }
8312 }
8313 }
8314#endif
8315
8316 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8317 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8318 if (rcStrict == VINF_SUCCESS)
8319 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8320 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8321 {
8322 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8323 rcStrict = VINF_SUCCESS;
8324 }
8325 else
8326 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8327 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8328
8329 return rcStrict;
8330}
8331
8332
8333/**
8334 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8335 */
8336HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8337{
8338 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8339
8340 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8341
8342 /*
8343 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8344 * Although we don't need to fetch the base as it will be overwritten shortly, while
8345 * loading guest-state we would also load the entire segment register including limit
8346 * and attributes and thus we need to load them here.
8347 */
8348 /** @todo Optimize this: We currently drag in the whole MSR state
8349 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8350 * MSRs required. That would require changes to IEM and possibly CPUM too.
8351 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8352 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8353 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8354 int rc;
8355 switch (idMsr)
8356 {
8357 default:
8358 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8359 __FUNCTION__);
8360 AssertRCReturn(rc, rc);
8361 break;
8362
8363 case MSR_K8_FS_BASE:
8364 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8365 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8366 AssertRCReturn(rc, rc);
8367 break;
8368 case MSR_K8_GS_BASE:
8369 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8370 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8371 AssertRCReturn(rc, rc);
8372 break;
8373 }
8374 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8375
8376 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8377 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8378
8379 if (rcStrict == VINF_SUCCESS)
8380 {
8381 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8382
8383 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8384 if ( idMsr == MSR_IA32_APICBASE
8385 || ( idMsr >= MSR_IA32_X2APIC_START
8386 && idMsr <= MSR_IA32_X2APIC_END))
8387 {
8388 /*
8389 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8390 * When full APIC register virtualization is implemented we'll have to make
8391 * sure APIC state is saved from the VMCS before IEM changes it.
8392 */
8393 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8394 }
8395 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8396 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8397 else if (idMsr == MSR_K6_EFER)
8398 {
8399 /*
8400 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8401 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8402 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8403 */
8404 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8405 }
8406
8407 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8408 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8409 {
8410 switch (idMsr)
8411 {
8412 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8413 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8414 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8415 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8416 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8417 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8418 default:
8419 {
8420#ifndef IN_NEM_DARWIN
8421 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8422 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8423 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8424 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8425#else
8426 AssertMsgFailed(("TODO\n"));
8427#endif
8428 break;
8429 }
8430 }
8431 }
8432#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8433 else
8434 {
8435 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8436 switch (idMsr)
8437 {
8438 case MSR_IA32_SYSENTER_CS:
8439 case MSR_IA32_SYSENTER_EIP:
8440 case MSR_IA32_SYSENTER_ESP:
8441 case MSR_K8_FS_BASE:
8442 case MSR_K8_GS_BASE:
8443 {
8444 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8445 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8446 }
8447
8448 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8449 default:
8450 {
8451 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8452 {
8453 /* EFER MSR writes are always intercepted. */
8454 if (idMsr != MSR_K6_EFER)
8455 {
8456 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8457 idMsr));
8458 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8459 }
8460 }
8461
8462 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8463 {
8464 Assert(pVmcsInfo->pvMsrBitmap);
8465 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8466 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8467 {
8468 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8469 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8470 }
8471 }
8472 break;
8473 }
8474 }
8475 }
8476#endif /* VBOX_STRICT */
8477 }
8478 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8479 {
8480 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8481 rcStrict = VINF_SUCCESS;
8482 }
8483 else
8484 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8485 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8486
8487 return rcStrict;
8488}
8489
8490
8491/**
8492 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8493 */
8494HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8495{
8496 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8497
8498 /** @todo The guest has likely hit a contended spinlock. We might want to
8499 * poke a schedule different guest VCPU. */
8500 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8501 if (RT_SUCCESS(rc))
8502 return VINF_EM_RAW_INTERRUPT;
8503
8504 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8505 return rc;
8506}
8507
8508
8509/**
8510 * VM-exit handler for when the TPR value is lowered below the specified
8511 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8512 */
8513HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8514{
8515 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8516 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8517
8518 /*
8519 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8520 * We'll re-evaluate pending interrupts and inject them before the next VM
8521 * entry so we can just continue execution here.
8522 */
8523 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8524 return VINF_SUCCESS;
8525}
8526
8527
8528/**
8529 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8530 * VM-exit.
8531 *
8532 * @retval VINF_SUCCESS when guest execution can continue.
8533 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8534 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8535 * incompatible guest state for VMX execution (real-on-v86 case).
8536 */
8537HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8538{
8539 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8540 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8541
8542 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8543 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8544 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8545
8546 VBOXSTRICTRC rcStrict;
8547 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8548 uint64_t const uExitQual = pVmxTransient->uExitQual;
8549 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8550 switch (uAccessType)
8551 {
8552 /*
8553 * MOV to CRx.
8554 */
8555 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8556 {
8557 /*
8558 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8559 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8560 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8561 * PAE PDPTEs as well.
8562 */
8563 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8564 AssertRCReturn(rc, rc);
8565
8566 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8567#ifndef IN_NEM_DARWIN
8568 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8569#endif
8570 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8571 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8572
8573 /*
8574 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8575 * - When nested paging isn't used.
8576 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8577 * - We are executing in the VM debug loop.
8578 */
8579#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8580# ifndef IN_NEM_DARWIN
8581 Assert( iCrReg != 3
8582 || !VM_IS_VMX_NESTED_PAGING(pVM)
8583 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8584 || pVCpu->hmr0.s.fUsingDebugLoop);
8585# else
8586 Assert( iCrReg != 3
8587 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8588# endif
8589#endif
8590
8591 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8592 Assert( iCrReg != 8
8593 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8594
8595 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8596 AssertMsg( rcStrict == VINF_SUCCESS
8597 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8598
8599#ifndef IN_NEM_DARWIN
8600 /*
8601 * This is a kludge for handling switches back to real mode when we try to use
8602 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8603 * deal with special selector values, so we have to return to ring-3 and run
8604 * there till the selector values are V86 mode compatible.
8605 *
8606 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8607 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8608 * this function.
8609 */
8610 if ( iCrReg == 0
8611 && rcStrict == VINF_SUCCESS
8612 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8613 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8614 && (uOldCr0 & X86_CR0_PE)
8615 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8616 {
8617 /** @todo Check selectors rather than returning all the time. */
8618 Assert(!pVmxTransient->fIsNestedGuest);
8619 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8620 rcStrict = VINF_EM_RESCHEDULE_REM;
8621 }
8622#endif
8623
8624 break;
8625 }
8626
8627 /*
8628 * MOV from CRx.
8629 */
8630 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8631 {
8632 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8633 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8634
8635 /*
8636 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8637 * - When nested paging isn't used.
8638 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8639 * - We are executing in the VM debug loop.
8640 */
8641#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8642# ifndef IN_NEM_DARWIN
8643 Assert( iCrReg != 3
8644 || !VM_IS_VMX_NESTED_PAGING(pVM)
8645 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8646 || pVCpu->hmr0.s.fLeaveDone);
8647# else
8648 Assert( iCrReg != 3
8649 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8650# endif
8651#endif
8652
8653 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8654 Assert( iCrReg != 8
8655 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8656
8657 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8658 break;
8659 }
8660
8661 /*
8662 * CLTS (Clear Task-Switch Flag in CR0).
8663 */
8664 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8665 {
8666 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8667 break;
8668 }
8669
8670 /*
8671 * LMSW (Load Machine-Status Word into CR0).
8672 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8673 */
8674 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8675 {
8676 RTGCPTR GCPtrEffDst;
8677 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8678 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8679 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8680 if (fMemOperand)
8681 {
8682 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8683 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8684 }
8685 else
8686 GCPtrEffDst = NIL_RTGCPTR;
8687 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8688 break;
8689 }
8690
8691 default:
8692 {
8693 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8694 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8695 }
8696 }
8697
8698 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8699 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8700 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8701
8702 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8703 NOREF(pVM);
8704 return rcStrict;
8705}
8706
8707
8708/**
8709 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8710 * VM-exit.
8711 */
8712HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8713{
8714 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8715 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8716
8717 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8718 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8719 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8720 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8721#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8722 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8723 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8724 AssertRCReturn(rc, rc);
8725
8726 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8727 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8728 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8729 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8730 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8731 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8732 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8733 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8734
8735 /*
8736 * Update exit history to see if this exit can be optimized.
8737 */
8738 VBOXSTRICTRC rcStrict;
8739 PCEMEXITREC pExitRec = NULL;
8740 if ( !fGstStepping
8741 && !fDbgStepping)
8742 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8743 !fIOString
8744 ? !fIOWrite
8745 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8746 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8747 : !fIOWrite
8748 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8749 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8750 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8751 if (!pExitRec)
8752 {
8753 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8754 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8755
8756 uint32_t const cbValue = s_aIOSizes[uIOSize];
8757 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8758 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8759 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8760 if (fIOString)
8761 {
8762 /*
8763 * INS/OUTS - I/O String instruction.
8764 *
8765 * Use instruction-information if available, otherwise fall back on
8766 * interpreting the instruction.
8767 */
8768 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8769 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8770 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8771 if (fInsOutsInfo)
8772 {
8773 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8774 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8775 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8776 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8777 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8778 if (fIOWrite)
8779 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8780 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8781 else
8782 {
8783 /*
8784 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8785 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8786 * See Intel Instruction spec. for "INS".
8787 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8788 */
8789 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8790 }
8791 }
8792 else
8793 rcStrict = IEMExecOne(pVCpu);
8794
8795 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8796 fUpdateRipAlready = true;
8797 }
8798 else
8799 {
8800 /*
8801 * IN/OUT - I/O instruction.
8802 */
8803 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8804 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8805 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8806 if (fIOWrite)
8807 {
8808 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8809 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8810#ifndef IN_NEM_DARWIN
8811 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8812 && !pCtx->eflags.Bits.u1TF)
8813 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8814#endif
8815 }
8816 else
8817 {
8818 uint32_t u32Result = 0;
8819 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8820 if (IOM_SUCCESS(rcStrict))
8821 {
8822 /* Save result of I/O IN instr. in AL/AX/EAX. */
8823 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8824 }
8825#ifndef IN_NEM_DARWIN
8826 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8827 && !pCtx->eflags.Bits.u1TF)
8828 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8829#endif
8830 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8831 }
8832 }
8833
8834 if (IOM_SUCCESS(rcStrict))
8835 {
8836 if (!fUpdateRipAlready)
8837 {
8838 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8839 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8840 }
8841
8842 /*
8843 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8844 * while booting Fedora 17 64-bit guest.
8845 *
8846 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8847 */
8848 if (fIOString)
8849 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8850
8851 /*
8852 * If any I/O breakpoints are armed, we need to check if one triggered
8853 * and take appropriate action.
8854 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8855 */
8856#if 1
8857 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8858#else
8859 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8860 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8861 AssertRCReturn(rc, rc);
8862#endif
8863
8864 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8865 * execution engines about whether hyper BPs and such are pending. */
8866 uint32_t const uDr7 = pCtx->dr[7];
8867 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8868 && X86_DR7_ANY_RW_IO(uDr7)
8869 && (pCtx->cr4 & X86_CR4_DE))
8870 || DBGFBpIsHwIoArmed(pVM)))
8871 {
8872 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8873
8874#ifndef IN_NEM_DARWIN
8875 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8876 VMMRZCallRing3Disable(pVCpu);
8877 HM_DISABLE_PREEMPT(pVCpu);
8878
8879 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8880
8881 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8882 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8883 {
8884 /* Raise #DB. */
8885 if (fIsGuestDbgActive)
8886 ASMSetDR6(pCtx->dr[6]);
8887 if (pCtx->dr[7] != uDr7)
8888 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8889
8890 vmxHCSetPendingXcptDB(pVCpu);
8891 }
8892 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8893 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8894 else if ( rcStrict2 != VINF_SUCCESS
8895 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8896 rcStrict = rcStrict2;
8897 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8898
8899 HM_RESTORE_PREEMPT();
8900 VMMRZCallRing3Enable(pVCpu);
8901#else
8902 /** @todo */
8903#endif
8904 }
8905 }
8906
8907#ifdef VBOX_STRICT
8908 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8909 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8910 Assert(!fIOWrite);
8911 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8912 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8913 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8914 Assert(fIOWrite);
8915 else
8916 {
8917# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8918 * statuses, that the VMM device and some others may return. See
8919 * IOM_SUCCESS() for guidance. */
8920 AssertMsg( RT_FAILURE(rcStrict)
8921 || rcStrict == VINF_SUCCESS
8922 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8923 || rcStrict == VINF_EM_DBG_BREAKPOINT
8924 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8925 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8926# endif
8927 }
8928#endif
8929 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8930 }
8931 else
8932 {
8933 /*
8934 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8935 */
8936 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8937 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8938 AssertRCReturn(rc2, rc2);
8939 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8940 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8941 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8942 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8943 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8944 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8945
8946 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8947 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8948
8949 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8950 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8951 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8952 }
8953 return rcStrict;
8954}
8955
8956
8957/**
8958 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8959 * VM-exit.
8960 */
8961HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8962{
8963 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8964
8965 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8966 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8967 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8968 {
8969 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8970 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8971 {
8972 uint32_t uErrCode;
8973 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8974 {
8975 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8976 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8977 }
8978 else
8979 uErrCode = 0;
8980
8981 RTGCUINTPTR GCPtrFaultAddress;
8982 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8983 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8984 else
8985 GCPtrFaultAddress = 0;
8986
8987 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8988
8989 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8990 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8991
8992 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8993 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8994 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8995 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8996 }
8997 }
8998
8999 /* Fall back to the interpreter to emulate the task-switch. */
9000 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9001 return VERR_EM_INTERPRETER;
9002}
9003
9004
9005/**
9006 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9007 */
9008HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9009{
9010 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9011
9012 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9013 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9014 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9015 AssertRC(rc);
9016 return VINF_EM_DBG_STEPPED;
9017}
9018
9019
9020/**
9021 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9022 */
9023HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9024{
9025 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9026 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9027
9028 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9029 | HMVMX_READ_EXIT_INSTR_LEN
9030 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9031 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9032 | HMVMX_READ_IDT_VECTORING_INFO
9033 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9034
9035 /*
9036 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9037 */
9038 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9039 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9040 {
9041 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9042 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9043 {
9044 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9045 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9046 }
9047 }
9048 else
9049 {
9050 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9051 return rcStrict;
9052 }
9053
9054 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9055 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9056 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9057 AssertRCReturn(rc, rc);
9058
9059 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9060 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9061 switch (uAccessType)
9062 {
9063#ifndef IN_NEM_DARWIN
9064 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9065 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9066 {
9067 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9068 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9069 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9070
9071 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9072 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9073 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9074 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9075 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9076
9077 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9078 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9079 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9080 if ( rcStrict == VINF_SUCCESS
9081 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9082 || rcStrict == VERR_PAGE_NOT_PRESENT)
9083 {
9084 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9085 | HM_CHANGED_GUEST_APIC_TPR);
9086 rcStrict = VINF_SUCCESS;
9087 }
9088 break;
9089 }
9090#else
9091 /** @todo */
9092#endif
9093
9094 default:
9095 {
9096 Log4Func(("uAccessType=%#x\n", uAccessType));
9097 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9098 break;
9099 }
9100 }
9101
9102 if (rcStrict != VINF_SUCCESS)
9103 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9104 return rcStrict;
9105}
9106
9107
9108/**
9109 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9110 * VM-exit.
9111 */
9112HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9113{
9114 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9115 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9116
9117 /*
9118 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9119 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9120 * must emulate the MOV DRx access.
9121 */
9122 if (!pVmxTransient->fIsNestedGuest)
9123 {
9124 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9125 if (pVmxTransient->fWasGuestDebugStateActive)
9126 {
9127 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9128 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9129 }
9130
9131 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9132 && !pVmxTransient->fWasHyperDebugStateActive)
9133 {
9134 Assert(!DBGFIsStepping(pVCpu));
9135 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9136
9137 /* Don't intercept MOV DRx any more. */
9138 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9139 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9140 AssertRC(rc);
9141
9142#ifndef IN_NEM_DARWIN
9143 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9144 VMMRZCallRing3Disable(pVCpu);
9145 HM_DISABLE_PREEMPT(pVCpu);
9146
9147 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9148 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9149 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9150
9151 HM_RESTORE_PREEMPT();
9152 VMMRZCallRing3Enable(pVCpu);
9153#else
9154 CPUMR3NemActivateGuestDebugState(pVCpu);
9155 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9156 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9157#endif
9158
9159#ifdef VBOX_WITH_STATISTICS
9160 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9161 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9162 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9163 else
9164 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9165#endif
9166 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9167 return VINF_SUCCESS;
9168 }
9169 }
9170
9171 /*
9172 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
9173 * The EFER MSR is always up-to-date.
9174 * Update the segment registers and DR7 from the CPU.
9175 */
9176 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9177 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9178 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9179 AssertRCReturn(rc, rc);
9180 Log4Func(("cs:rip=%#04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
9181
9182 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9183 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9184 {
9185 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
9186 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
9187 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
9188 if (RT_SUCCESS(rc))
9189 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
9190 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9191 }
9192 else
9193 {
9194 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
9195 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
9196 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
9197 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9198 }
9199
9200 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9201 if (RT_SUCCESS(rc))
9202 {
9203 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
9204 AssertRCReturn(rc2, rc2);
9205 return VINF_SUCCESS;
9206 }
9207 return rc;
9208}
9209
9210
9211/**
9212 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9213 * Conditional VM-exit.
9214 */
9215HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9216{
9217 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9218
9219#ifndef IN_NEM_DARWIN
9220 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9221
9222 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9223 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9224 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9225 | HMVMX_READ_IDT_VECTORING_INFO
9226 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9227 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9228
9229 /*
9230 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9231 */
9232 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9233 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9234 {
9235 /*
9236 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9237 * instruction emulation to inject the original event. Otherwise, injecting the original event
9238 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9239 */
9240 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9241 { /* likely */ }
9242 else
9243 {
9244 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9245# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9246 /** @todo NSTVMX: Think about how this should be handled. */
9247 if (pVmxTransient->fIsNestedGuest)
9248 return VERR_VMX_IPE_3;
9249# endif
9250 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9251 }
9252 }
9253 else
9254 {
9255 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9256 return rcStrict;
9257 }
9258
9259 /*
9260 * Get sufficient state and update the exit history entry.
9261 */
9262 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9263 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9264 AssertRCReturn(rc, rc);
9265
9266 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9267 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9268 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9269 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9270 if (!pExitRec)
9271 {
9272 /*
9273 * If we succeed, resume guest execution.
9274 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9275 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9276 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9277 * weird case. See @bugref{6043}.
9278 */
9279 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9280/** @todo bird: We can probably just go straight to IOM here and assume that
9281 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9282 * well. However, we need to address that aliasing workarounds that
9283 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9284 *
9285 * Might also be interesting to see if we can get this done more or
9286 * less locklessly inside IOM. Need to consider the lookup table
9287 * updating and use a bit more carefully first (or do all updates via
9288 * rendezvous) */
9289 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9290 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9291 if ( rcStrict == VINF_SUCCESS
9292 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9293 || rcStrict == VERR_PAGE_NOT_PRESENT)
9294 {
9295 /* Successfully handled MMIO operation. */
9296 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9297 | HM_CHANGED_GUEST_APIC_TPR);
9298 rcStrict = VINF_SUCCESS;
9299 }
9300 }
9301 else
9302 {
9303 /*
9304 * Frequent exit or something needing probing. Call EMHistoryExec.
9305 */
9306 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9307 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9308
9309 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9310 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9311
9312 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9313 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9314 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9315 }
9316 return rcStrict;
9317#else
9318 AssertFailed();
9319 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9320#endif
9321}
9322
9323
9324/**
9325 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9326 * VM-exit.
9327 */
9328HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9329{
9330 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9331#ifndef IN_NEM_DARWIN
9332 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9333
9334 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9335 | HMVMX_READ_EXIT_INSTR_LEN
9336 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9337 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9338 | HMVMX_READ_IDT_VECTORING_INFO
9339 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9340 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9341
9342 /*
9343 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9344 */
9345 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9346 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9347 {
9348 /*
9349 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9350 * we shall resolve the nested #PF and re-inject the original event.
9351 */
9352 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9353 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9354 }
9355 else
9356 {
9357 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9358 return rcStrict;
9359 }
9360
9361 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9362 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9363 AssertRCReturn(rc, rc);
9364
9365 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9366 uint64_t const uExitQual = pVmxTransient->uExitQual;
9367 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9368
9369 RTGCUINT uErrorCode = 0;
9370 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9371 uErrorCode |= X86_TRAP_PF_ID;
9372 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9373 uErrorCode |= X86_TRAP_PF_RW;
9374 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9375 uErrorCode |= X86_TRAP_PF_P;
9376
9377 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9378 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9379
9380 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9381
9382 /*
9383 * Handle the pagefault trap for the nested shadow table.
9384 */
9385 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9386 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9387 TRPMResetTrap(pVCpu);
9388
9389 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9390 if ( rcStrict == VINF_SUCCESS
9391 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9392 || rcStrict == VERR_PAGE_NOT_PRESENT)
9393 {
9394 /* Successfully synced our nested page tables. */
9395 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9396 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9397 return VINF_SUCCESS;
9398 }
9399 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9400 return rcStrict;
9401
9402#else /* IN_NEM_DARWIN */
9403 PVM pVM = pVCpu->CTX_SUFF(pVM);
9404 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9405 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9406 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9407 vmxHCImportGuestRip(pVCpu);
9408 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9409
9410 /*
9411 * Ask PGM for information about the given GCPhys. We need to check if we're
9412 * out of sync first.
9413 */
9414 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9415 false,
9416 false };
9417 PGMPHYSNEMPAGEINFO Info;
9418 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9419 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9420 if (RT_SUCCESS(rc))
9421 {
9422 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9423 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9424 {
9425 if (State.fCanResume)
9426 {
9427 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9428 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9429 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9430 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9431 State.fDidSomething ? "" : " no-change"));
9432 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9433 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9434 return VINF_SUCCESS;
9435 }
9436 }
9437
9438 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9439 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9440 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9441 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9442 State.fDidSomething ? "" : " no-change"));
9443 }
9444 else
9445 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9446 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9447 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9448
9449 /*
9450 * Emulate the memory access, either access handler or special memory.
9451 */
9452 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9453 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9454 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9455 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9456 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9457
9458 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9459 AssertRCReturn(rc, rc);
9460
9461 VBOXSTRICTRC rcStrict;
9462 if (!pExitRec)
9463 rcStrict = IEMExecOne(pVCpu);
9464 else
9465 {
9466 /* Frequent access or probing. */
9467 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9468 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9469 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9470 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9471 }
9472
9473 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9474
9475 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9476 return rcStrict;
9477#endif /* IN_NEM_DARWIN */
9478}
9479
9480#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9481
9482/**
9483 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9484 */
9485HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9486{
9487 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9488
9489 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9490 | HMVMX_READ_EXIT_INSTR_INFO
9491 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9492 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9493 | CPUMCTX_EXTRN_SREG_MASK
9494 | CPUMCTX_EXTRN_HWVIRT
9495 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9496 AssertRCReturn(rc, rc);
9497
9498 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9499
9500 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9501 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9502
9503 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9504 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9505 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9506 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9507 {
9508 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9509 rcStrict = VINF_SUCCESS;
9510 }
9511 return rcStrict;
9512}
9513
9514
9515/**
9516 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9517 */
9518HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9519{
9520 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9521
9522 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9523 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9524 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9525 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9526 AssertRCReturn(rc, rc);
9527
9528 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9529
9530 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9531 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9532 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9533 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9534 {
9535 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9536 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9537 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9538 }
9539 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9540 return rcStrict;
9541}
9542
9543
9544/**
9545 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9546 */
9547HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9548{
9549 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9550
9551 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9552 | HMVMX_READ_EXIT_INSTR_INFO
9553 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9554 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9555 | CPUMCTX_EXTRN_SREG_MASK
9556 | CPUMCTX_EXTRN_HWVIRT
9557 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9558 AssertRCReturn(rc, rc);
9559
9560 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9561
9562 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9563 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9564
9565 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9566 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9567 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9568 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9569 {
9570 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9571 rcStrict = VINF_SUCCESS;
9572 }
9573 return rcStrict;
9574}
9575
9576
9577/**
9578 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9579 */
9580HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9581{
9582 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9583
9584 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9585 | HMVMX_READ_EXIT_INSTR_INFO
9586 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9587 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9588 | CPUMCTX_EXTRN_SREG_MASK
9589 | CPUMCTX_EXTRN_HWVIRT
9590 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9591 AssertRCReturn(rc, rc);
9592
9593 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9594
9595 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9596 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9597
9598 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9599 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9600 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9601 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9602 {
9603 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9604 rcStrict = VINF_SUCCESS;
9605 }
9606 return rcStrict;
9607}
9608
9609
9610/**
9611 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9612 */
9613HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9614{
9615 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9616
9617 /*
9618 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9619 * thus might not need to import the shadow VMCS state, it's safer just in case
9620 * code elsewhere dares look at unsynced VMCS fields.
9621 */
9622 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9623 | HMVMX_READ_EXIT_INSTR_INFO
9624 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9625 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9626 | CPUMCTX_EXTRN_SREG_MASK
9627 | CPUMCTX_EXTRN_HWVIRT
9628 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9629 AssertRCReturn(rc, rc);
9630
9631 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9632
9633 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9634 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9635 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9636
9637 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9638 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9639 {
9640 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9641
9642# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9643 /* Try for exit optimization. This is on the following instruction
9644 because it would be a waste of time to have to reinterpret the
9645 already decoded vmwrite instruction. */
9646 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9647 if (pExitRec)
9648 {
9649 /* Frequent access or probing. */
9650 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9651 AssertRCReturn(rc, rc);
9652
9653 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9654 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9655 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9656 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9657 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9658 }
9659# endif
9660 }
9661 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9662 {
9663 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9664 rcStrict = VINF_SUCCESS;
9665 }
9666 return rcStrict;
9667}
9668
9669
9670/**
9671 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9672 */
9673HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9674{
9675 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9676
9677 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9678 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9679 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9680 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9681 AssertRCReturn(rc, rc);
9682
9683 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9684
9685 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9686 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9687 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9688 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9689 {
9690 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9691 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9692 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9693 }
9694 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9695 return rcStrict;
9696}
9697
9698
9699/**
9700 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9701 */
9702HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9703{
9704 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9705
9706 /*
9707 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9708 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9709 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9710 */
9711 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9712 | HMVMX_READ_EXIT_INSTR_INFO
9713 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9714 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9715 | CPUMCTX_EXTRN_SREG_MASK
9716 | CPUMCTX_EXTRN_HWVIRT
9717 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9718 AssertRCReturn(rc, rc);
9719
9720 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9721
9722 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9723 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9724 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9725
9726 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9727 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9728 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9729 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9730 {
9731 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9732 rcStrict = VINF_SUCCESS;
9733 }
9734 return rcStrict;
9735}
9736
9737
9738/**
9739 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9740 */
9741HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9742{
9743 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9744
9745 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9746 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9747 | CPUMCTX_EXTRN_HWVIRT
9748 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9749 AssertRCReturn(rc, rc);
9750
9751 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9752
9753 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9754 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9755 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9756 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9757 {
9758 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9759 rcStrict = VINF_SUCCESS;
9760 }
9761 return rcStrict;
9762}
9763
9764
9765/**
9766 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9767 */
9768HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9769{
9770 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9771
9772 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9773 | HMVMX_READ_EXIT_INSTR_INFO
9774 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9775 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9776 | CPUMCTX_EXTRN_SREG_MASK
9777 | CPUMCTX_EXTRN_HWVIRT
9778 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9779 AssertRCReturn(rc, rc);
9780
9781 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9782
9783 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9784 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9785
9786 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9787 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9788 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9789 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9790 {
9791 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9792 rcStrict = VINF_SUCCESS;
9793 }
9794 return rcStrict;
9795}
9796
9797
9798/**
9799 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9800 */
9801HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9802{
9803 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9804
9805 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9806 | HMVMX_READ_EXIT_INSTR_INFO
9807 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9808 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9809 | CPUMCTX_EXTRN_SREG_MASK
9810 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9811 AssertRCReturn(rc, rc);
9812
9813 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9814
9815 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9816 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9817
9818 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9819 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9820 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9821 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9822 {
9823 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9824 rcStrict = VINF_SUCCESS;
9825 }
9826 return rcStrict;
9827}
9828
9829
9830# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9831/**
9832 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9833 */
9834HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9835{
9836 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9837
9838 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9839 | HMVMX_READ_EXIT_INSTR_INFO
9840 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9841 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9842 | CPUMCTX_EXTRN_SREG_MASK
9843 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9844 AssertRCReturn(rc, rc);
9845
9846 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9847
9848 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9849 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9850
9851 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9852 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9853 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9854 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9855 {
9856 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9857 rcStrict = VINF_SUCCESS;
9858 }
9859 return rcStrict;
9860}
9861# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9862#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9863/** @} */
9864
9865
9866#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9867/** @name Nested-guest VM-exit handlers.
9868 * @{
9869 */
9870/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9871/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9872/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9873
9874/**
9875 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9876 * Conditional VM-exit.
9877 */
9878HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9879{
9880 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9881
9882 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9883
9884 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9885 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9886 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9887
9888 switch (uExitIntType)
9889 {
9890# ifndef IN_NEM_DARWIN
9891 /*
9892 * Physical NMIs:
9893 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9894 */
9895 case VMX_EXIT_INT_INFO_TYPE_NMI:
9896 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9897# endif
9898
9899 /*
9900 * Hardware exceptions,
9901 * Software exceptions,
9902 * Privileged software exceptions:
9903 * Figure out if the exception must be delivered to the guest or the nested-guest.
9904 */
9905 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9906 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9907 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9908 {
9909 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9910 | HMVMX_READ_EXIT_INSTR_LEN
9911 | HMVMX_READ_IDT_VECTORING_INFO
9912 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9913
9914 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9915 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9916 {
9917 /* Exit qualification is required for debug and page-fault exceptions. */
9918 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9919
9920 /*
9921 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9922 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9923 * length. However, if delivery of a software interrupt, software exception or privileged
9924 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9925 */
9926 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9927 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9928 pVmxTransient->uExitIntErrorCode,
9929 pVmxTransient->uIdtVectoringInfo,
9930 pVmxTransient->uIdtVectoringErrorCode);
9931#ifdef DEBUG_ramshankar
9932 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9933 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9934 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9935 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9936 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9937 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9938#endif
9939 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9940 }
9941
9942 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9943 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9944 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9945 }
9946
9947 /*
9948 * Software interrupts:
9949 * VM-exits cannot be caused by software interrupts.
9950 *
9951 * External interrupts:
9952 * This should only happen when "acknowledge external interrupts on VM-exit"
9953 * control is set. However, we never set this when executing a guest or
9954 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9955 * the guest.
9956 */
9957 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9958 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9959 default:
9960 {
9961 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9962 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9963 }
9964 }
9965}
9966
9967
9968/**
9969 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9970 * Unconditional VM-exit.
9971 */
9972HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9973{
9974 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9975 return IEMExecVmxVmexitTripleFault(pVCpu);
9976}
9977
9978
9979/**
9980 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9981 */
9982HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9983{
9984 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9985
9986 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9987 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9988 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9989}
9990
9991
9992/**
9993 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9994 */
9995HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9996{
9997 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9998
9999 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10000 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10001 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10002}
10003
10004
10005/**
10006 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10007 * Unconditional VM-exit.
10008 */
10009HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10010{
10011 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10012
10013 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10014 | HMVMX_READ_EXIT_INSTR_LEN
10015 | HMVMX_READ_IDT_VECTORING_INFO
10016 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10017
10018 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10019 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10020 pVmxTransient->uIdtVectoringErrorCode);
10021 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10022}
10023
10024
10025/**
10026 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10027 */
10028HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10029{
10030 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10031
10032 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10033 {
10034 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10035 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10036 }
10037 return vmxHCExitHlt(pVCpu, pVmxTransient);
10038}
10039
10040
10041/**
10042 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10043 */
10044HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10045{
10046 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10047
10048 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10049 {
10050 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10051 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10052 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10053 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10054 }
10055 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10056}
10057
10058
10059/**
10060 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10061 */
10062HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10063{
10064 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10065
10066 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10067 {
10068 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10069 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10070 }
10071 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10072}
10073
10074
10075/**
10076 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10077 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10078 */
10079HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10080{
10081 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10082
10083 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10084 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10085
10086 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10087
10088 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10089 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10090 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10091
10092 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10093 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10094 u64VmcsField &= UINT64_C(0xffffffff);
10095
10096 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10097 {
10098 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10099 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10100 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10101 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10102 }
10103
10104 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10105 return vmxHCExitVmread(pVCpu, pVmxTransient);
10106 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10107}
10108
10109
10110/**
10111 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10112 */
10113HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10114{
10115 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10116
10117 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10118 {
10119 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10120 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10121 }
10122
10123 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10124}
10125
10126
10127/**
10128 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10129 * Conditional VM-exit.
10130 */
10131HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10132{
10133 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10134
10135 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10136 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10137
10138 VBOXSTRICTRC rcStrict;
10139 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10140 switch (uAccessType)
10141 {
10142 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10143 {
10144 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10145 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10146 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10147 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10148
10149 bool fIntercept;
10150 switch (iCrReg)
10151 {
10152 case 0:
10153 case 4:
10154 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10155 break;
10156
10157 case 3:
10158 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10159 break;
10160
10161 case 8:
10162 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10163 break;
10164
10165 default:
10166 fIntercept = false;
10167 break;
10168 }
10169 if (fIntercept)
10170 {
10171 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10172 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10173 }
10174 else
10175 {
10176 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10177 AssertRCReturn(rc, rc);
10178 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10179 }
10180 break;
10181 }
10182
10183 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10184 {
10185 /*
10186 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10187 * CR2 reads do not cause a VM-exit.
10188 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10189 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10190 */
10191 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10192 if ( iCrReg == 3
10193 || iCrReg == 8)
10194 {
10195 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10196 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10197 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10198 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10199 {
10200 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10201 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10202 }
10203 else
10204 {
10205 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10206 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10207 }
10208 }
10209 else
10210 {
10211 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10212 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10213 }
10214 break;
10215 }
10216
10217 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10218 {
10219 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10220 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10221 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10222 if ( (uGstHostMask & X86_CR0_TS)
10223 && (uReadShadow & X86_CR0_TS))
10224 {
10225 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10226 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10227 }
10228 else
10229 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10230 break;
10231 }
10232
10233 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10234 {
10235 RTGCPTR GCPtrEffDst;
10236 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10237 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10238 if (fMemOperand)
10239 {
10240 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10241 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10242 }
10243 else
10244 GCPtrEffDst = NIL_RTGCPTR;
10245
10246 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10247 {
10248 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10249 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10250 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10251 }
10252 else
10253 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10254 break;
10255 }
10256
10257 default:
10258 {
10259 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10260 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10261 }
10262 }
10263
10264 if (rcStrict == VINF_IEM_RAISED_XCPT)
10265 {
10266 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10267 rcStrict = VINF_SUCCESS;
10268 }
10269 return rcStrict;
10270}
10271
10272
10273/**
10274 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10275 * Conditional VM-exit.
10276 */
10277HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10278{
10279 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10280
10281 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10282 {
10283 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10284 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10285 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10286 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10287 }
10288 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10289}
10290
10291
10292/**
10293 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10294 * Conditional VM-exit.
10295 */
10296HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10297{
10298 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10299
10300 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10301
10302 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10303 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10304 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10305
10306 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10307 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10308 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10309 {
10310 /*
10311 * IN/OUT instruction:
10312 * - Provides VM-exit instruction length.
10313 *
10314 * INS/OUTS instruction:
10315 * - Provides VM-exit instruction length.
10316 * - Provides Guest-linear address.
10317 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10318 */
10319 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10320 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10321
10322 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10323 pVmxTransient->ExitInstrInfo.u = 0;
10324 pVmxTransient->uGuestLinearAddr = 0;
10325
10326 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10327 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10328 if (fIOString)
10329 {
10330 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10331 if (fVmxInsOutsInfo)
10332 {
10333 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10334 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10335 }
10336 }
10337
10338 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10339 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10340 }
10341 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10342}
10343
10344
10345/**
10346 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10347 */
10348HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10349{
10350 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10351
10352 uint32_t fMsrpm;
10353 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10354 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10355 else
10356 fMsrpm = VMXMSRPM_EXIT_RD;
10357
10358 if (fMsrpm & VMXMSRPM_EXIT_RD)
10359 {
10360 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10361 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10362 }
10363 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10364}
10365
10366
10367/**
10368 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10369 */
10370HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10371{
10372 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10373
10374 uint32_t fMsrpm;
10375 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10376 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10377 else
10378 fMsrpm = VMXMSRPM_EXIT_WR;
10379
10380 if (fMsrpm & VMXMSRPM_EXIT_WR)
10381 {
10382 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10383 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10384 }
10385 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10386}
10387
10388
10389/**
10390 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10391 */
10392HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10393{
10394 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10395
10396 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10397 {
10398 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10399 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10400 }
10401 return vmxHCExitMwait(pVCpu, pVmxTransient);
10402}
10403
10404
10405/**
10406 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10407 * VM-exit.
10408 */
10409HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10410{
10411 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10412
10413 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10414 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10415 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10416 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10417}
10418
10419
10420/**
10421 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10422 */
10423HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10424{
10425 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10426
10427 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10428 {
10429 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10430 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10431 }
10432 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10433}
10434
10435
10436/**
10437 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10438 */
10439HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10440{
10441 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10442
10443 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10444 * PAUSE when executing a nested-guest? If it does not, we would not need
10445 * to check for the intercepts here. Just call VM-exit... */
10446
10447 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10448 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10449 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10450 {
10451 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10452 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10453 }
10454 return vmxHCExitPause(pVCpu, pVmxTransient);
10455}
10456
10457
10458/**
10459 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10460 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10461 */
10462HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10463{
10464 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10465
10466 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10467 {
10468 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10469 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10470 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10471 }
10472 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10473}
10474
10475
10476/**
10477 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10478 * VM-exit.
10479 */
10480HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10481{
10482 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10483
10484 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10485 | HMVMX_READ_EXIT_INSTR_LEN
10486 | HMVMX_READ_IDT_VECTORING_INFO
10487 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10488
10489 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10490
10491 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10492 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10493
10494 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10495 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10496 pVmxTransient->uIdtVectoringErrorCode);
10497 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10498}
10499
10500
10501/**
10502 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10503 * Conditional VM-exit.
10504 */
10505HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10506{
10507 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10508
10509 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10510 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10511 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10512}
10513
10514
10515/**
10516 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10517 * Conditional VM-exit.
10518 */
10519HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10520{
10521 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10522
10523 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10524 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10525 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10526}
10527
10528
10529/**
10530 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10531 */
10532HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10533{
10534 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10535
10536 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10537 {
10538 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10539 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10540 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10541 }
10542 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10543}
10544
10545
10546/**
10547 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10548 */
10549HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10550{
10551 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10552
10553 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10554 {
10555 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10556 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10557 }
10558 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10559}
10560
10561
10562/**
10563 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10564 */
10565HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10566{
10567 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10568
10569 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10570 {
10571 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10572 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10573 | HMVMX_READ_EXIT_INSTR_INFO
10574 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10575 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10576 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10577 }
10578 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10579}
10580
10581
10582/**
10583 * Nested-guest VM-exit handler for invalid-guest state
10584 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10585 */
10586HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10587{
10588 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10589
10590 /*
10591 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10592 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10593 * Handle it like it's in an invalid guest state of the outer guest.
10594 *
10595 * When the fast path is implemented, this should be changed to cause the corresponding
10596 * nested-guest VM-exit.
10597 */
10598 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10599}
10600
10601
10602/**
10603 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10604 * and only provide the instruction length.
10605 *
10606 * Unconditional VM-exit.
10607 */
10608HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10609{
10610 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10611
10612#ifdef VBOX_STRICT
10613 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10614 switch (pVmxTransient->uExitReason)
10615 {
10616 case VMX_EXIT_ENCLS:
10617 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10618 break;
10619
10620 case VMX_EXIT_VMFUNC:
10621 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10622 break;
10623 }
10624#endif
10625
10626 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10627 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10628}
10629
10630
10631/**
10632 * Nested-guest VM-exit handler for instructions that provide instruction length as
10633 * well as more information.
10634 *
10635 * Unconditional VM-exit.
10636 */
10637HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10638{
10639 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10640
10641# ifdef VBOX_STRICT
10642 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10643 switch (pVmxTransient->uExitReason)
10644 {
10645 case VMX_EXIT_GDTR_IDTR_ACCESS:
10646 case VMX_EXIT_LDTR_TR_ACCESS:
10647 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10648 break;
10649
10650 case VMX_EXIT_RDRAND:
10651 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10652 break;
10653
10654 case VMX_EXIT_RDSEED:
10655 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10656 break;
10657
10658 case VMX_EXIT_XSAVES:
10659 case VMX_EXIT_XRSTORS:
10660 /** @todo NSTVMX: Verify XSS-bitmap. */
10661 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10662 break;
10663
10664 case VMX_EXIT_UMWAIT:
10665 case VMX_EXIT_TPAUSE:
10666 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10667 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10668 break;
10669
10670 case VMX_EXIT_LOADIWKEY:
10671 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10672 break;
10673 }
10674# endif
10675
10676 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10677 | HMVMX_READ_EXIT_INSTR_LEN
10678 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10679 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10680 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10681}
10682
10683# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10684
10685/**
10686 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10687 * Conditional VM-exit.
10688 */
10689HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10690{
10691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10692 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10693
10694 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10695 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10696 {
10697 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10698 | HMVMX_READ_EXIT_INSTR_LEN
10699 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10700 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10701 | HMVMX_READ_IDT_VECTORING_INFO
10702 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10703 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10704 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10705 AssertRCReturn(rc, rc);
10706
10707 /*
10708 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10709 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10710 * it's its problem to deal with that issue and we'll clear the recovered event.
10711 */
10712 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10713 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10714 { /*likely*/ }
10715 else
10716 {
10717 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10718 return rcStrict;
10719 }
10720 bool const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10721
10722 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10723 uint64_t const uExitQual = pVmxTransient->uExitQual;
10724
10725 RTGCPTR GCPtrNestedFault;
10726 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10727 if (fIsLinearAddrValid)
10728 {
10729 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10730 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10731 }
10732 else
10733 GCPtrNestedFault = 0;
10734
10735 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10736 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10737 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10738 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10739 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10740
10741 PGMPTWALK Walk;
10742 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10743 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10744 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10745 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10746 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10747 if (RT_SUCCESS(rcStrict))
10748 return rcStrict;
10749
10750 if (fClearEventOnForward)
10751 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10752
10753 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10754 pVmxTransient->uIdtVectoringErrorCode);
10755 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10756 {
10757 VMXVEXITINFO const ExitInfo
10758 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10759 pVmxTransient->uExitQual,
10760 pVmxTransient->cbExitInstr,
10761 pVmxTransient->uGuestLinearAddr,
10762 pVmxTransient->uGuestPhysicalAddr);
10763 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10764 }
10765
10766 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10767 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10768 }
10769
10770 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10771}
10772
10773
10774/**
10775 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10776 * Conditional VM-exit.
10777 */
10778HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10779{
10780 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10781 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10782
10783 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10784 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10785 {
10786 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10787 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10788 AssertRCReturn(rc, rc);
10789
10790 PGMPTWALK Walk;
10791 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10792 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10793 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10794 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10795 0 /* GCPtrNestedFault */, &Walk);
10796 if (RT_SUCCESS(rcStrict))
10797 {
10798 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10799 return rcStrict;
10800 }
10801
10802 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10803 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10804 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10805
10806 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10807 pVmxTransient->uIdtVectoringErrorCode);
10808 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10809 }
10810
10811 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10812}
10813
10814# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10815
10816/** @} */
10817#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10818
10819
10820/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10821 * probes.
10822 *
10823 * The following few functions and associated structure contains the bloat
10824 * necessary for providing detailed debug events and dtrace probes as well as
10825 * reliable host side single stepping. This works on the principle of
10826 * "subclassing" the normal execution loop and workers. We replace the loop
10827 * method completely and override selected helpers to add necessary adjustments
10828 * to their core operation.
10829 *
10830 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10831 * any performance for debug and analysis features.
10832 *
10833 * @{
10834 */
10835
10836/**
10837 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10838 * the debug run loop.
10839 */
10840typedef struct VMXRUNDBGSTATE
10841{
10842 /** The RIP we started executing at. This is for detecting that we stepped. */
10843 uint64_t uRipStart;
10844 /** The CS we started executing with. */
10845 uint16_t uCsStart;
10846
10847 /** Whether we've actually modified the 1st execution control field. */
10848 bool fModifiedProcCtls : 1;
10849 /** Whether we've actually modified the 2nd execution control field. */
10850 bool fModifiedProcCtls2 : 1;
10851 /** Whether we've actually modified the exception bitmap. */
10852 bool fModifiedXcptBitmap : 1;
10853
10854 /** We desire the modified the CR0 mask to be cleared. */
10855 bool fClearCr0Mask : 1;
10856 /** We desire the modified the CR4 mask to be cleared. */
10857 bool fClearCr4Mask : 1;
10858 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10859 uint32_t fCpe1Extra;
10860 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10861 uint32_t fCpe1Unwanted;
10862 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10863 uint32_t fCpe2Extra;
10864 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10865 uint32_t bmXcptExtra;
10866 /** The sequence number of the Dtrace provider settings the state was
10867 * configured against. */
10868 uint32_t uDtraceSettingsSeqNo;
10869 /** VM-exits to check (one bit per VM-exit). */
10870 uint32_t bmExitsToCheck[3];
10871
10872 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10873 uint32_t fProcCtlsInitial;
10874 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10875 uint32_t fProcCtls2Initial;
10876 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10877 uint32_t bmXcptInitial;
10878} VMXRUNDBGSTATE;
10879AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10880typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10881
10882
10883/**
10884 * Initializes the VMXRUNDBGSTATE structure.
10885 *
10886 * @param pVCpu The cross context virtual CPU structure of the
10887 * calling EMT.
10888 * @param pVmxTransient The VMX-transient structure.
10889 * @param pDbgState The debug state to initialize.
10890 */
10891static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10892{
10893 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10894 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10895
10896 pDbgState->fModifiedProcCtls = false;
10897 pDbgState->fModifiedProcCtls2 = false;
10898 pDbgState->fModifiedXcptBitmap = false;
10899 pDbgState->fClearCr0Mask = false;
10900 pDbgState->fClearCr4Mask = false;
10901 pDbgState->fCpe1Extra = 0;
10902 pDbgState->fCpe1Unwanted = 0;
10903 pDbgState->fCpe2Extra = 0;
10904 pDbgState->bmXcptExtra = 0;
10905 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10906 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10907 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10908}
10909
10910
10911/**
10912 * Updates the VMSC fields with changes requested by @a pDbgState.
10913 *
10914 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10915 * immediately before executing guest code, i.e. when interrupts are disabled.
10916 * We don't check status codes here as we cannot easily assert or return in the
10917 * latter case.
10918 *
10919 * @param pVCpu The cross context virtual CPU structure.
10920 * @param pVmxTransient The VMX-transient structure.
10921 * @param pDbgState The debug state.
10922 */
10923static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10924{
10925 /*
10926 * Ensure desired flags in VMCS control fields are set.
10927 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10928 *
10929 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10930 * there should be no stale data in pCtx at this point.
10931 */
10932 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10933 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10934 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10935 {
10936 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10937 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10938 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10939 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10940 pDbgState->fModifiedProcCtls = true;
10941 }
10942
10943 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10944 {
10945 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10946 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10947 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10948 pDbgState->fModifiedProcCtls2 = true;
10949 }
10950
10951 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10952 {
10953 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10954 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10955 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10956 pDbgState->fModifiedXcptBitmap = true;
10957 }
10958
10959 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10960 {
10961 pVmcsInfo->u64Cr0Mask = 0;
10962 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10963 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10964 }
10965
10966 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10967 {
10968 pVmcsInfo->u64Cr4Mask = 0;
10969 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10970 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10971 }
10972
10973 NOREF(pVCpu);
10974}
10975
10976
10977/**
10978 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10979 * re-entry next time around.
10980 *
10981 * @returns Strict VBox status code (i.e. informational status codes too).
10982 * @param pVCpu The cross context virtual CPU structure.
10983 * @param pVmxTransient The VMX-transient structure.
10984 * @param pDbgState The debug state.
10985 * @param rcStrict The return code from executing the guest using single
10986 * stepping.
10987 */
10988static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10989 VBOXSTRICTRC rcStrict)
10990{
10991 /*
10992 * Restore VM-exit control settings as we may not reenter this function the
10993 * next time around.
10994 */
10995 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10996
10997 /* We reload the initial value, trigger what we can of recalculations the
10998 next time around. From the looks of things, that's all that's required atm. */
10999 if (pDbgState->fModifiedProcCtls)
11000 {
11001 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11002 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11003 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11004 AssertRC(rc2);
11005 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11006 }
11007
11008 /* We're currently the only ones messing with this one, so just restore the
11009 cached value and reload the field. */
11010 if ( pDbgState->fModifiedProcCtls2
11011 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11012 {
11013 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11014 AssertRC(rc2);
11015 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11016 }
11017
11018 /* If we've modified the exception bitmap, we restore it and trigger
11019 reloading and partial recalculation the next time around. */
11020 if (pDbgState->fModifiedXcptBitmap)
11021 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11022
11023 return rcStrict;
11024}
11025
11026
11027/**
11028 * Configures VM-exit controls for current DBGF and DTrace settings.
11029 *
11030 * This updates @a pDbgState and the VMCS execution control fields to reflect
11031 * the necessary VM-exits demanded by DBGF and DTrace.
11032 *
11033 * @param pVCpu The cross context virtual CPU structure.
11034 * @param pVmxTransient The VMX-transient structure. May update
11035 * fUpdatedTscOffsettingAndPreemptTimer.
11036 * @param pDbgState The debug state.
11037 */
11038static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11039{
11040#ifndef IN_NEM_DARWIN
11041 /*
11042 * Take down the dtrace serial number so we can spot changes.
11043 */
11044 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11045 ASMCompilerBarrier();
11046#endif
11047
11048 /*
11049 * We'll rebuild most of the middle block of data members (holding the
11050 * current settings) as we go along here, so start by clearing it all.
11051 */
11052 pDbgState->bmXcptExtra = 0;
11053 pDbgState->fCpe1Extra = 0;
11054 pDbgState->fCpe1Unwanted = 0;
11055 pDbgState->fCpe2Extra = 0;
11056 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11057 pDbgState->bmExitsToCheck[i] = 0;
11058
11059 /*
11060 * Software interrupts (INT XXh) - no idea how to trigger these...
11061 */
11062 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11063 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11064 || VBOXVMM_INT_SOFTWARE_ENABLED())
11065 {
11066 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11067 }
11068
11069 /*
11070 * INT3 breakpoints - triggered by #BP exceptions.
11071 */
11072 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11073 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11074
11075 /*
11076 * Exception bitmap and XCPT events+probes.
11077 */
11078 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11079 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11080 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11081
11082 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11083 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11084 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11085 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11086 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11087 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11088 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11089 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11090 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11091 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11092 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11093 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11094 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11095 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11096 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11097 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11098 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11099 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11100
11101 if (pDbgState->bmXcptExtra)
11102 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11103
11104 /*
11105 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11106 *
11107 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11108 * So, when adding/changing/removing please don't forget to update it.
11109 *
11110 * Some of the macros are picking up local variables to save horizontal space,
11111 * (being able to see it in a table is the lesser evil here).
11112 */
11113#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11114 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11115 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11116#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11117 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11118 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11119 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11120 } else do { } while (0)
11121#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11122 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11123 { \
11124 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11125 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11126 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11127 } else do { } while (0)
11128#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11129 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11130 { \
11131 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11132 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11133 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11134 } else do { } while (0)
11135#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11136 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11137 { \
11138 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11139 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11140 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11141 } else do { } while (0)
11142
11143 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11144 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11145 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11146 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11147 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11148
11149 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11150 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11151 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11152 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11153 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11154 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11155 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11156 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11157 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11158 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11159 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11160 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11161 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11162 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11163 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11164 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11165 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11166 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11167 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11168 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11169 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11170 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11171 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11172 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11173 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11174 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11175 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11176 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11177 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11178 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11179 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11180 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11181 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11182 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11183 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11184 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11185
11186 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11187 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11188 {
11189 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11190 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11191 AssertRC(rc);
11192
11193#if 0 /** @todo fix me */
11194 pDbgState->fClearCr0Mask = true;
11195 pDbgState->fClearCr4Mask = true;
11196#endif
11197 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11198 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11199 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11200 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11201 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11202 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11203 require clearing here and in the loop if we start using it. */
11204 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11205 }
11206 else
11207 {
11208 if (pDbgState->fClearCr0Mask)
11209 {
11210 pDbgState->fClearCr0Mask = false;
11211 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11212 }
11213 if (pDbgState->fClearCr4Mask)
11214 {
11215 pDbgState->fClearCr4Mask = false;
11216 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11217 }
11218 }
11219 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11220 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11221
11222 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11223 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11224 {
11225 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11226 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11227 }
11228 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11229 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11230
11231 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11232 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11233 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11234 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11235 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11236 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11237 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11238 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11239#if 0 /** @todo too slow, fix handler. */
11240 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11241#endif
11242 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11243
11244 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11245 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11246 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11247 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11248 {
11249 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11250 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11251 }
11252 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11253 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11254 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11255 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11256
11257 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11258 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11259 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11260 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11261 {
11262 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11263 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11264 }
11265 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11266 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11267 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11268 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11269
11270 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11271 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11272 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11273 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11274 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11275 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11276 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11277 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11278 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11279 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11280 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11281 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11282 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11283 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11284 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11285 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11286 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11287 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11288 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11289 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11290 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11291 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11292
11293#undef IS_EITHER_ENABLED
11294#undef SET_ONLY_XBM_IF_EITHER_EN
11295#undef SET_CPE1_XBM_IF_EITHER_EN
11296#undef SET_CPEU_XBM_IF_EITHER_EN
11297#undef SET_CPE2_XBM_IF_EITHER_EN
11298
11299 /*
11300 * Sanitize the control stuff.
11301 */
11302 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11303 if (pDbgState->fCpe2Extra)
11304 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11305 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11306 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11307#ifndef IN_NEM_DARWIN
11308 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11309 {
11310 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11311 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11312 }
11313#else
11314 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11315 {
11316 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11317 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11318 }
11319#endif
11320
11321 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11322 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11323 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11324 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11325}
11326
11327
11328/**
11329 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11330 * appropriate.
11331 *
11332 * The caller has checked the VM-exit against the
11333 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11334 * already, so we don't have to do that either.
11335 *
11336 * @returns Strict VBox status code (i.e. informational status codes too).
11337 * @param pVCpu The cross context virtual CPU structure.
11338 * @param pVmxTransient The VMX-transient structure.
11339 * @param uExitReason The VM-exit reason.
11340 *
11341 * @remarks The name of this function is displayed by dtrace, so keep it short
11342 * and to the point. No longer than 33 chars long, please.
11343 */
11344static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11345{
11346 /*
11347 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11348 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11349 *
11350 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11351 * does. Must add/change/remove both places. Same ordering, please.
11352 *
11353 * Added/removed events must also be reflected in the next section
11354 * where we dispatch dtrace events.
11355 */
11356 bool fDtrace1 = false;
11357 bool fDtrace2 = false;
11358 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11359 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11360 uint32_t uEventArg = 0;
11361#define SET_EXIT(a_EventSubName) \
11362 do { \
11363 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11364 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11365 } while (0)
11366#define SET_BOTH(a_EventSubName) \
11367 do { \
11368 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11369 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11370 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11371 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11372 } while (0)
11373 switch (uExitReason)
11374 {
11375 case VMX_EXIT_MTF:
11376 return vmxHCExitMtf(pVCpu, pVmxTransient);
11377
11378 case VMX_EXIT_XCPT_OR_NMI:
11379 {
11380 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11381 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11382 {
11383 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11384 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11385 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11386 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11387 {
11388 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11389 {
11390 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11391 uEventArg = pVmxTransient->uExitIntErrorCode;
11392 }
11393 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11394 switch (enmEvent1)
11395 {
11396 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11397 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11398 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11399 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11400 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11401 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11402 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11403 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11404 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11405 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11406 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11407 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11408 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11409 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11410 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11411 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11412 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11413 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11414 default: break;
11415 }
11416 }
11417 else
11418 AssertFailed();
11419 break;
11420
11421 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11422 uEventArg = idxVector;
11423 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11424 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11425 break;
11426 }
11427 break;
11428 }
11429
11430 case VMX_EXIT_TRIPLE_FAULT:
11431 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11432 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11433 break;
11434 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11435 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11436 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11437 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11438 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11439
11440 /* Instruction specific VM-exits: */
11441 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11442 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11443 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11444 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11445 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11446 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11447 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11448 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11449 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11450 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11451 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11452 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11453 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11454 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11455 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11456 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11457 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11458 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11459 case VMX_EXIT_MOV_CRX:
11460 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11461 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11462 SET_BOTH(CRX_READ);
11463 else
11464 SET_BOTH(CRX_WRITE);
11465 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11466 break;
11467 case VMX_EXIT_MOV_DRX:
11468 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11469 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11470 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11471 SET_BOTH(DRX_READ);
11472 else
11473 SET_BOTH(DRX_WRITE);
11474 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11475 break;
11476 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11477 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11478 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11479 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11480 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11481 case VMX_EXIT_GDTR_IDTR_ACCESS:
11482 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11483 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11484 {
11485 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11486 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11487 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11488 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11489 }
11490 break;
11491
11492 case VMX_EXIT_LDTR_TR_ACCESS:
11493 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11494 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11495 {
11496 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11497 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11498 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11499 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11500 }
11501 break;
11502
11503 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11504 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11505 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11506 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11507 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11508 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11509 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11510 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11511 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11512 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11513 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11514
11515 /* Events that aren't relevant at this point. */
11516 case VMX_EXIT_EXT_INT:
11517 case VMX_EXIT_INT_WINDOW:
11518 case VMX_EXIT_NMI_WINDOW:
11519 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11520 case VMX_EXIT_PREEMPT_TIMER:
11521 case VMX_EXIT_IO_INSTR:
11522 break;
11523
11524 /* Errors and unexpected events. */
11525 case VMX_EXIT_INIT_SIGNAL:
11526 case VMX_EXIT_SIPI:
11527 case VMX_EXIT_IO_SMI:
11528 case VMX_EXIT_SMI:
11529 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11530 case VMX_EXIT_ERR_MSR_LOAD:
11531 case VMX_EXIT_ERR_MACHINE_CHECK:
11532 case VMX_EXIT_PML_FULL:
11533 case VMX_EXIT_VIRTUALIZED_EOI:
11534 break;
11535
11536 default:
11537 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11538 break;
11539 }
11540#undef SET_BOTH
11541#undef SET_EXIT
11542
11543 /*
11544 * Dtrace tracepoints go first. We do them here at once so we don't
11545 * have to copy the guest state saving and stuff a few dozen times.
11546 * Down side is that we've got to repeat the switch, though this time
11547 * we use enmEvent since the probes are a subset of what DBGF does.
11548 */
11549 if (fDtrace1 || fDtrace2)
11550 {
11551 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11552 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11553 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11554 switch (enmEvent1)
11555 {
11556 /** @todo consider which extra parameters would be helpful for each probe. */
11557 case DBGFEVENT_END: break;
11558 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11559 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11560 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11561 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11562 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11563 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11564 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11565 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11566 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11567 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11568 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11569 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11570 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11571 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11572 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11573 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11574 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11575 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11576 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11577 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11578 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11579 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11580 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11581 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11582 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11583 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11584 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11585 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11586 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11587 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11588 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11589 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11590 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11591 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11592 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11593 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11594 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11595 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11596 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11597 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11598 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11599 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11600 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11601 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11602 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11603 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11604 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11605 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11606 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11607 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11608 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11609 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11610 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11611 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11612 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11613 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11614 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11615 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11616 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11617 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11618 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11619 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11620 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11621 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11622 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11623 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11624 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11625 }
11626 switch (enmEvent2)
11627 {
11628 /** @todo consider which extra parameters would be helpful for each probe. */
11629 case DBGFEVENT_END: break;
11630 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11631 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11632 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11633 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11634 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11635 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11636 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11637 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11638 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11639 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11640 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11641 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11642 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11643 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11644 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11645 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11646 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11647 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11648 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11649 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11650 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11651 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11652 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11653 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11654 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11655 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11656 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11657 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11658 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11659 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11660 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11661 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11662 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11663 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11664 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11665 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11666 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11667 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11668 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11669 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11670 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11671 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11672 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11673 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11674 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11675 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11676 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11677 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11678 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11679 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11680 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11681 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11682 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11683 }
11684 }
11685
11686 /*
11687 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11688 * the DBGF call will do a full check).
11689 *
11690 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11691 * Note! If we have to events, we prioritize the first, i.e. the instruction
11692 * one, in order to avoid event nesting.
11693 */
11694 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11695 if ( enmEvent1 != DBGFEVENT_END
11696 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11697 {
11698 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11699 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11700 if (rcStrict != VINF_SUCCESS)
11701 return rcStrict;
11702 }
11703 else if ( enmEvent2 != DBGFEVENT_END
11704 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11705 {
11706 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11707 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11708 if (rcStrict != VINF_SUCCESS)
11709 return rcStrict;
11710 }
11711
11712 return VINF_SUCCESS;
11713}
11714
11715
11716/**
11717 * Single-stepping VM-exit filtering.
11718 *
11719 * This is preprocessing the VM-exits and deciding whether we've gotten far
11720 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11721 * handling is performed.
11722 *
11723 * @returns Strict VBox status code (i.e. informational status codes too).
11724 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11725 * @param pVmxTransient The VMX-transient structure.
11726 * @param pDbgState The debug state.
11727 */
11728DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11729{
11730 /*
11731 * Expensive (saves context) generic dtrace VM-exit probe.
11732 */
11733 uint32_t const uExitReason = pVmxTransient->uExitReason;
11734 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11735 { /* more likely */ }
11736 else
11737 {
11738 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11739 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11740 AssertRC(rc);
11741 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11742 }
11743
11744#ifndef IN_NEM_DARWIN
11745 /*
11746 * Check for host NMI, just to get that out of the way.
11747 */
11748 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11749 { /* normally likely */ }
11750 else
11751 {
11752 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11753 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11754 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11755 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11756 }
11757#endif
11758
11759 /*
11760 * Check for single stepping event if we're stepping.
11761 */
11762 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11763 {
11764 switch (uExitReason)
11765 {
11766 case VMX_EXIT_MTF:
11767 return vmxHCExitMtf(pVCpu, pVmxTransient);
11768
11769 /* Various events: */
11770 case VMX_EXIT_XCPT_OR_NMI:
11771 case VMX_EXIT_EXT_INT:
11772 case VMX_EXIT_TRIPLE_FAULT:
11773 case VMX_EXIT_INT_WINDOW:
11774 case VMX_EXIT_NMI_WINDOW:
11775 case VMX_EXIT_TASK_SWITCH:
11776 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11777 case VMX_EXIT_APIC_ACCESS:
11778 case VMX_EXIT_EPT_VIOLATION:
11779 case VMX_EXIT_EPT_MISCONFIG:
11780 case VMX_EXIT_PREEMPT_TIMER:
11781
11782 /* Instruction specific VM-exits: */
11783 case VMX_EXIT_CPUID:
11784 case VMX_EXIT_GETSEC:
11785 case VMX_EXIT_HLT:
11786 case VMX_EXIT_INVD:
11787 case VMX_EXIT_INVLPG:
11788 case VMX_EXIT_RDPMC:
11789 case VMX_EXIT_RDTSC:
11790 case VMX_EXIT_RSM:
11791 case VMX_EXIT_VMCALL:
11792 case VMX_EXIT_VMCLEAR:
11793 case VMX_EXIT_VMLAUNCH:
11794 case VMX_EXIT_VMPTRLD:
11795 case VMX_EXIT_VMPTRST:
11796 case VMX_EXIT_VMREAD:
11797 case VMX_EXIT_VMRESUME:
11798 case VMX_EXIT_VMWRITE:
11799 case VMX_EXIT_VMXOFF:
11800 case VMX_EXIT_VMXON:
11801 case VMX_EXIT_MOV_CRX:
11802 case VMX_EXIT_MOV_DRX:
11803 case VMX_EXIT_IO_INSTR:
11804 case VMX_EXIT_RDMSR:
11805 case VMX_EXIT_WRMSR:
11806 case VMX_EXIT_MWAIT:
11807 case VMX_EXIT_MONITOR:
11808 case VMX_EXIT_PAUSE:
11809 case VMX_EXIT_GDTR_IDTR_ACCESS:
11810 case VMX_EXIT_LDTR_TR_ACCESS:
11811 case VMX_EXIT_INVEPT:
11812 case VMX_EXIT_RDTSCP:
11813 case VMX_EXIT_INVVPID:
11814 case VMX_EXIT_WBINVD:
11815 case VMX_EXIT_XSETBV:
11816 case VMX_EXIT_RDRAND:
11817 case VMX_EXIT_INVPCID:
11818 case VMX_EXIT_VMFUNC:
11819 case VMX_EXIT_RDSEED:
11820 case VMX_EXIT_XSAVES:
11821 case VMX_EXIT_XRSTORS:
11822 {
11823 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11824 AssertRCReturn(rc, rc);
11825 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11826 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11827 return VINF_EM_DBG_STEPPED;
11828 break;
11829 }
11830
11831 /* Errors and unexpected events: */
11832 case VMX_EXIT_INIT_SIGNAL:
11833 case VMX_EXIT_SIPI:
11834 case VMX_EXIT_IO_SMI:
11835 case VMX_EXIT_SMI:
11836 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11837 case VMX_EXIT_ERR_MSR_LOAD:
11838 case VMX_EXIT_ERR_MACHINE_CHECK:
11839 case VMX_EXIT_PML_FULL:
11840 case VMX_EXIT_VIRTUALIZED_EOI:
11841 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11842 break;
11843
11844 default:
11845 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11846 break;
11847 }
11848 }
11849
11850 /*
11851 * Check for debugger event breakpoints and dtrace probes.
11852 */
11853 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11854 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11855 {
11856 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11857 if (rcStrict != VINF_SUCCESS)
11858 return rcStrict;
11859 }
11860
11861 /*
11862 * Normal processing.
11863 */
11864#ifdef HMVMX_USE_FUNCTION_TABLE
11865 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11866#else
11867 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11868#endif
11869}
11870
11871/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette