VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 99327

Last change on this file since 99327 was 99259, checked in by vboxsync, 21 months ago

VMM/VMXAllTemplate.cpp.h: Nested VMX: bugref:10318 Assert VT-x's guarantee that blocking-by-STI and Mov-SS are never both set simultaneously.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 520.4 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 99259 2023-04-03 12:15:37Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related force-flags.
1700 *
1701 * @returns Guest's interruptibility-state.
1702 * @param pVCpu The cross context virtual CPU structure.
1703 *
1704 * @remarks No-long-jump zone!!!
1705 */
1706static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1707{
1708 uint32_t fIntrState;
1709
1710 /*
1711 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1712 */
1713 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1714 fIntrState = 0;
1715 else
1716 {
1717 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1719
1720 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1721 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1722 else
1723 {
1724 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1725
1726 /* Block-by-STI must not be set when interrupts are disabled. */
1727 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1728 }
1729 }
1730
1731 /*
1732 * Check if we should inhibit NMI delivery.
1733 */
1734 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1735 { /* likely */ }
1736 else
1737 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1738
1739 /*
1740 * Validate.
1741 */
1742 /* We don't support block-by-SMI yet.*/
1743 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1744
1745 return fIntrState;
1746}
1747
1748
1749/**
1750 * Exports the exception intercepts required for guest execution in the VMCS.
1751 *
1752 * @param pVCpu The cross context virtual CPU structure.
1753 * @param pVmxTransient The VMX-transient structure.
1754 *
1755 * @remarks No-long-jump zone!!!
1756 */
1757static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1758{
1759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1760 {
1761 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1762 if ( !pVmxTransient->fIsNestedGuest
1763 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1764 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1765 else
1766 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1767
1768 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1769 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1770 }
1771}
1772
1773
1774/**
1775 * Exports the guest's RIP into the guest-state area in the VMCS.
1776 *
1777 * @param pVCpu The cross context virtual CPU structure.
1778 *
1779 * @remarks No-long-jump zone!!!
1780 */
1781static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1782{
1783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1784 {
1785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1786
1787 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1788 AssertRC(rc);
1789
1790 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1791 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1792 }
1793}
1794
1795
1796/**
1797 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1798 *
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param pVmxTransient The VMX-transient structure.
1801 *
1802 * @remarks No-long-jump zone!!!
1803 */
1804static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1805{
1806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1807 {
1808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1809
1810 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1811 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1812 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1813 Use 32-bit VMWRITE. */
1814 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1815 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1816 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1817
1818#ifndef IN_NEM_DARWIN
1819 /*
1820 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1821 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1822 * can run the real-mode guest code under Virtual 8086 mode.
1823 */
1824 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1825 if (pVmcsInfo->RealMode.fRealOnV86Active)
1826 {
1827 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1828 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1829 Assert(!pVmxTransient->fIsNestedGuest);
1830 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1831 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1832 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1833 }
1834#else
1835 RT_NOREF(pVmxTransient);
1836#endif
1837
1838 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1839 AssertRC(rc);
1840
1841 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1842 Log4Func(("eflags=%#RX32\n", fEFlags));
1843 }
1844}
1845
1846
1847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1848/**
1849 * Copies the nested-guest VMCS to the shadow VMCS.
1850 *
1851 * @returns VBox status code.
1852 * @param pVCpu The cross context virtual CPU structure.
1853 * @param pVmcsInfo The VMCS info. object.
1854 *
1855 * @remarks No-long-jump zone!!!
1856 */
1857static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1858{
1859 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1860 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1861
1862 /*
1863 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1864 * current VMCS, as we may try saving guest lazy MSRs.
1865 *
1866 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1867 * calling the import VMCS code which is currently performing the guest MSR reads
1868 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1869 * and the rest of the VMX leave session machinery.
1870 */
1871 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1872
1873 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1874 if (RT_SUCCESS(rc))
1875 {
1876 /*
1877 * Copy all guest read/write VMCS fields.
1878 *
1879 * We don't check for VMWRITE failures here for performance reasons and
1880 * because they are not expected to fail, barring irrecoverable conditions
1881 * like hardware errors.
1882 */
1883 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1884 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1885 {
1886 uint64_t u64Val;
1887 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1888 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1889 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1890 }
1891
1892 /*
1893 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1894 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1895 */
1896 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1897 {
1898 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1899 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906 }
1907
1908 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1909 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1910 }
1911
1912 ASMSetFlags(fEFlags);
1913 return rc;
1914}
1915
1916
1917/**
1918 * Copies the shadow VMCS to the nested-guest VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVCpu The cross context virtual CPU structure.
1922 * @param pVmcsInfo The VMCS info. object.
1923 *
1924 * @remarks Called with interrupts disabled.
1925 */
1926static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1927{
1928 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1929 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1930 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1931
1932 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1933 if (RT_SUCCESS(rc))
1934 {
1935 /*
1936 * Copy guest read/write fields from the shadow VMCS.
1937 * Guest read-only fields cannot be modified, so no need to copy them.
1938 *
1939 * We don't check for VMREAD failures here for performance reasons and
1940 * because they are not expected to fail, barring irrecoverable conditions
1941 * like hardware errors.
1942 */
1943 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1944 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1945 {
1946 uint64_t u64Val;
1947 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1948 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1949 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1950 }
1951
1952 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1953 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1954 }
1955 return rc;
1956}
1957
1958
1959/**
1960 * Enables VMCS shadowing for the given VMCS info. object.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 * @param pVmcsInfo The VMCS info. object.
1964 *
1965 * @remarks No-long-jump zone!!!
1966 */
1967static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1968{
1969 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1970 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1971 {
1972 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1973 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1974 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1975 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1976 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1977 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1978 Log4Func(("Enabled\n"));
1979 }
1980}
1981
1982
1983/**
1984 * Disables VMCS shadowing for the given VMCS info. object.
1985 *
1986 * @param pVCpu The cross context virtual CPU structure.
1987 * @param pVmcsInfo The VMCS info. object.
1988 *
1989 * @remarks No-long-jump zone!!!
1990 */
1991static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1992{
1993 /*
1994 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1995 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1996 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1997 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1998 *
1999 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2000 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2001 */
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2004 {
2005 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2007 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2008 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2009 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2010 Log4Func(("Disabled\n"));
2011 }
2012}
2013#endif
2014
2015
2016/**
2017 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2018 *
2019 * The guest FPU state is always pre-loaded hence we don't need to bother about
2020 * sharing FPU related CR0 bits between the guest and host.
2021 *
2022 * @returns VBox status code.
2023 * @param pVCpu The cross context virtual CPU structure.
2024 * @param pVmxTransient The VMX-transient structure.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2029{
2030 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2031 {
2032 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2033 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2034
2035 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2036 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2037 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2038 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2039 else
2040 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2041
2042 if (!pVmxTransient->fIsNestedGuest)
2043 {
2044 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2045 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2046 uint64_t const u64ShadowCr0 = u64GuestCr0;
2047 Assert(!RT_HI_U32(u64GuestCr0));
2048
2049 /*
2050 * Setup VT-x's view of the guest CR0.
2051 */
2052 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2053 if (VM_IS_VMX_NESTED_PAGING(pVM))
2054 {
2055#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2056 if (CPUMIsGuestPagingEnabled(pVCpu))
2057 {
2058 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2059 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2060 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2061 }
2062 else
2063 {
2064 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2065 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2066 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2067 }
2068
2069 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2072#endif
2073 }
2074 else
2075 {
2076 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2077 u64GuestCr0 |= X86_CR0_WP;
2078 }
2079
2080 /*
2081 * Guest FPU bits.
2082 *
2083 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2084 * using CR0.TS.
2085 *
2086 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2087 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2088 */
2089 u64GuestCr0 |= X86_CR0_NE;
2090
2091 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2092 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2093
2094 /*
2095 * Update exception intercepts.
2096 */
2097 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2098#ifndef IN_NEM_DARWIN
2099 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2100 {
2101 Assert(PDMVmmDevHeapIsEnabled(pVM));
2102 Assert(pVM->hm.s.vmx.pRealModeTSS);
2103 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2104 }
2105 else
2106#endif
2107 {
2108 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2109 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2110 if (fInterceptMF)
2111 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2112 }
2113
2114 /* Additional intercepts for debugging, define these yourself explicitly. */
2115#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2116 uXcptBitmap |= 0
2117 | RT_BIT(X86_XCPT_BP)
2118 | RT_BIT(X86_XCPT_DE)
2119 | RT_BIT(X86_XCPT_NM)
2120 | RT_BIT(X86_XCPT_TS)
2121 | RT_BIT(X86_XCPT_UD)
2122 | RT_BIT(X86_XCPT_NP)
2123 | RT_BIT(X86_XCPT_SS)
2124 | RT_BIT(X86_XCPT_GP)
2125 | RT_BIT(X86_XCPT_PF)
2126 | RT_BIT(X86_XCPT_MF)
2127 ;
2128#elif defined(HMVMX_ALWAYS_TRAP_PF)
2129 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2130#endif
2131 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2133 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2134 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2135 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2136
2137 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2138 u64GuestCr0 |= fSetCr0;
2139 u64GuestCr0 &= fZapCr0;
2140 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2141
2142 Assert(!RT_HI_U32(u64GuestCr0));
2143 Assert(u64GuestCr0 & X86_CR0_NE);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177
2178 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2179 u64GuestCr0 |= fSetCr0;
2180 u64GuestCr0 &= fZapCr0;
2181 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2182
2183 Assert(!RT_HI_U32(u64GuestCr0));
2184 Assert(u64GuestCr0 & X86_CR0_NE);
2185
2186 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2187 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2188 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2189
2190 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2191 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2192 }
2193
2194 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2195 }
2196
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Exports the guest control registers (CR3, CR4) into the guest-state area
2203 * in the VMCS.
2204 *
2205 * @returns VBox strict status code.
2206 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2207 * without unrestricted guest access and the VMMDev is not presently
2208 * mapped (e.g. EFI32).
2209 *
2210 * @param pVCpu The cross context virtual CPU structure.
2211 * @param pVmxTransient The VMX-transient structure.
2212 *
2213 * @remarks No-long-jump zone!!!
2214 */
2215static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2216{
2217 int rc = VINF_SUCCESS;
2218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2219
2220 /*
2221 * Guest CR2.
2222 * It's always loaded in the assembler code. Nothing to do here.
2223 */
2224
2225 /*
2226 * Guest CR3.
2227 */
2228 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2229 {
2230 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2231
2232 if (VM_IS_VMX_NESTED_PAGING(pVM))
2233 {
2234#ifndef IN_NEM_DARWIN
2235 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2236 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2237
2238 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2239 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2240 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2241 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2242
2243 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2244 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2245 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2246
2247 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2248 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2249 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2250 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2251 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2252 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2253 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2254
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2256 AssertRC(rc);
2257#endif
2258
2259 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2260 uint64_t u64GuestCr3 = pCtx->cr3;
2261 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2262 || CPUMIsGuestPagingEnabledEx(pCtx))
2263 {
2264 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2265 if (CPUMIsGuestInPAEModeEx(pCtx))
2266 {
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2271 }
2272
2273 /*
2274 * The guest's view of its CR3 is unblemished with nested paging when the
2275 * guest is using paging or we have unrestricted guest execution to handle
2276 * the guest when it's not using paging.
2277 */
2278 }
2279#ifndef IN_NEM_DARWIN
2280 else
2281 {
2282 /*
2283 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2284 * thinks it accesses physical memory directly, we use our identity-mapped
2285 * page table to map guest-linear to guest-physical addresses. EPT takes care
2286 * of translating it to host-physical addresses.
2287 */
2288 RTGCPHYS GCPhys;
2289 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2290
2291 /* We obtain it here every time as the guest could have relocated this PCI region. */
2292 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2293 if (RT_SUCCESS(rc))
2294 { /* likely */ }
2295 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2296 {
2297 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2298 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2299 }
2300 else
2301 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2302
2303 u64GuestCr3 = GCPhys;
2304 }
2305#endif
2306
2307 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2308 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2309 AssertRC(rc);
2310 }
2311 else
2312 {
2313 Assert(!pVmxTransient->fIsNestedGuest);
2314 /* Non-nested paging case, just use the hypervisor's CR3. */
2315 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2316
2317 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2318 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2319 AssertRC(rc);
2320 }
2321
2322 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2323 }
2324
2325 /*
2326 * Guest CR4.
2327 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2328 */
2329 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2330 {
2331 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2332 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2333
2334 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2335 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2336
2337 /*
2338 * With nested-guests, we may have extended the guest/host mask here (since we
2339 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2340 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2341 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2342 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2343 */
2344 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2345 uint64_t u64GuestCr4 = pCtx->cr4;
2346 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2347 ? pCtx->cr4
2348 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2349 Assert(!RT_HI_U32(u64GuestCr4));
2350
2351#ifndef IN_NEM_DARWIN
2352 /*
2353 * Setup VT-x's view of the guest CR4.
2354 *
2355 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2356 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2357 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2358 *
2359 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2360 */
2361 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2362 {
2363 Assert(pVM->hm.s.vmx.pRealModeTSS);
2364 Assert(PDMVmmDevHeapIsEnabled(pVM));
2365 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2366 }
2367#endif
2368
2369 if (VM_IS_VMX_NESTED_PAGING(pVM))
2370 {
2371 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2372 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2373 {
2374 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2375 u64GuestCr4 |= X86_CR4_PSE;
2376 /* Our identity mapping is a 32-bit page directory. */
2377 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2378 }
2379 /* else use guest CR4.*/
2380 }
2381 else
2382 {
2383 Assert(!pVmxTransient->fIsNestedGuest);
2384
2385 /*
2386 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2387 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2388 */
2389 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2390 {
2391 case PGMMODE_REAL: /* Real-mode. */
2392 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2393 case PGMMODE_32_BIT: /* 32-bit paging. */
2394 {
2395 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2396 break;
2397 }
2398
2399 case PGMMODE_PAE: /* PAE paging. */
2400 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2401 {
2402 u64GuestCr4 |= X86_CR4_PAE;
2403 break;
2404 }
2405
2406 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2407 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2408 {
2409#ifdef VBOX_WITH_64_BITS_GUESTS
2410 /* For our assumption in vmxHCShouldSwapEferMsr. */
2411 Assert(u64GuestCr4 & X86_CR4_PAE);
2412 break;
2413#endif
2414 }
2415 default:
2416 AssertFailed();
2417 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2418 }
2419 }
2420
2421 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2422 u64GuestCr4 |= fSetCr4;
2423 u64GuestCr4 &= fZapCr4;
2424
2425 Assert(!RT_HI_U32(u64GuestCr4));
2426 Assert(u64GuestCr4 & X86_CR4_VMXE);
2427
2428 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2429 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2431
2432#ifndef IN_NEM_DARWIN
2433 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2434 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2435 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2436 {
2437 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2438 hmR0VmxUpdateStartVmFunction(pVCpu);
2439 }
2440#endif
2441
2442 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2443
2444 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2445 }
2446 return rc;
2447}
2448
2449
2450#ifdef VBOX_STRICT
2451/**
2452 * Strict function to validate segment registers.
2453 *
2454 * @param pVCpu The cross context virtual CPU structure.
2455 * @param pVmcsInfo The VMCS info. object.
2456 *
2457 * @remarks Will import guest CR0 on strict builds during validation of
2458 * segments.
2459 */
2460static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2461{
2462 /*
2463 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2464 *
2465 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2466 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2467 * unusable bit and doesn't change the guest-context value.
2468 */
2469 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2470 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2471 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2472 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2473 && ( !CPUMIsGuestInRealModeEx(pCtx)
2474 && !CPUMIsGuestInV86ModeEx(pCtx)))
2475 {
2476 /* Protected mode checks */
2477 /* CS */
2478 Assert(pCtx->cs.Attr.n.u1Present);
2479 Assert(!(pCtx->cs.Attr.u & 0xf00));
2480 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2481 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2482 || !(pCtx->cs.Attr.n.u1Granularity));
2483 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2484 || (pCtx->cs.Attr.n.u1Granularity));
2485 /* CS cannot be loaded with NULL in protected mode. */
2486 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2487 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2488 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2489 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2490 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2491 else
2492 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2493 /* SS */
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2496 if ( !(pCtx->cr0 & X86_CR0_PE)
2497 || pCtx->cs.Attr.n.u4Type == 3)
2498 {
2499 Assert(!pCtx->ss.Attr.n.u2Dpl);
2500 }
2501 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2502 {
2503 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2504 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2505 Assert(pCtx->ss.Attr.n.u1Present);
2506 Assert(!(pCtx->ss.Attr.u & 0xf00));
2507 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2508 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2509 || !(pCtx->ss.Attr.n.u1Granularity));
2510 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2511 || (pCtx->ss.Attr.n.u1Granularity));
2512 }
2513 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2514 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2515 {
2516 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2517 Assert(pCtx->ds.Attr.n.u1Present);
2518 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2519 Assert(!(pCtx->ds.Attr.u & 0xf00));
2520 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2521 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2522 || !(pCtx->ds.Attr.n.u1Granularity));
2523 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2524 || (pCtx->ds.Attr.n.u1Granularity));
2525 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2526 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2527 }
2528 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->es.Attr.n.u1Present);
2532 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->es.Attr.u & 0xf00));
2534 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->es.Attr.n.u1Granularity));
2537 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2538 || (pCtx->es.Attr.n.u1Granularity));
2539 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->fs.Attr.n.u1Present);
2546 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->fs.Attr.u & 0xf00));
2548 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->fs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2552 || (pCtx->fs.Attr.n.u1Granularity));
2553 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->gs.Attr.n.u1Present);
2560 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->gs.Attr.u & 0xf00));
2562 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->gs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2566 || (pCtx->gs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 /* 64-bit capable CPUs. */
2571 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2572 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2573 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2574 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2575 }
2576 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2577 || ( CPUMIsGuestInRealModeEx(pCtx)
2578 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2579 {
2580 /* Real and v86 mode checks. */
2581 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2582 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2583#ifndef IN_NEM_DARWIN
2584 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2585 {
2586 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2587 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2588 }
2589 else
2590#endif
2591 {
2592 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2593 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2594 }
2595
2596 /* CS */
2597 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2598 Assert(pCtx->cs.u32Limit == 0xffff);
2599 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2600 /* SS */
2601 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2602 Assert(pCtx->ss.u32Limit == 0xffff);
2603 Assert(u32SSAttr == 0xf3);
2604 /* DS */
2605 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2606 Assert(pCtx->ds.u32Limit == 0xffff);
2607 Assert(u32DSAttr == 0xf3);
2608 /* ES */
2609 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2610 Assert(pCtx->es.u32Limit == 0xffff);
2611 Assert(u32ESAttr == 0xf3);
2612 /* FS */
2613 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2614 Assert(pCtx->fs.u32Limit == 0xffff);
2615 Assert(u32FSAttr == 0xf3);
2616 /* GS */
2617 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2618 Assert(pCtx->gs.u32Limit == 0xffff);
2619 Assert(u32GSAttr == 0xf3);
2620 /* 64-bit capable CPUs. */
2621 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2622 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2623 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2624 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2625 }
2626}
2627#endif /* VBOX_STRICT */
2628
2629
2630/**
2631 * Exports a guest segment register into the guest-state area in the VMCS.
2632 *
2633 * @returns VBox status code.
2634 * @param pVCpu The cross context virtual CPU structure.
2635 * @param pVmcsInfo The VMCS info. object.
2636 * @param iSegReg The segment register number (X86_SREG_XXX).
2637 * @param pSelReg Pointer to the segment selector.
2638 *
2639 * @remarks No-long-jump zone!!!
2640 */
2641static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2642{
2643 Assert(iSegReg < X86_SREG_COUNT);
2644
2645 uint32_t u32Access = pSelReg->Attr.u;
2646#ifndef IN_NEM_DARWIN
2647 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2648#endif
2649 {
2650 /*
2651 * The way to differentiate between whether this is really a null selector or was just
2652 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2653 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2654 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2655 * NULL selectors loaded in protected-mode have their attribute as 0.
2656 */
2657 if (u32Access)
2658 { }
2659 else
2660 u32Access = X86DESCATTR_UNUSABLE;
2661 }
2662#ifndef IN_NEM_DARWIN
2663 else
2664 {
2665 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2666 u32Access = 0xf3;
2667 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2668 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2669 RT_NOREF_PV(pVCpu);
2670 }
2671#else
2672 RT_NOREF(pVmcsInfo);
2673#endif
2674
2675 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2676 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2677 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2678
2679 /*
2680 * Commit it to the VMCS.
2681 */
2682 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2683 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2686 return VINF_SUCCESS;
2687}
2688
2689
2690/**
2691 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2692 * area in the VMCS.
2693 *
2694 * @returns VBox status code.
2695 * @param pVCpu The cross context virtual CPU structure.
2696 * @param pVmxTransient The VMX-transient structure.
2697 *
2698 * @remarks Will import guest CR0 on strict builds during validation of
2699 * segments.
2700 * @remarks No-long-jump zone!!!
2701 */
2702static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2703{
2704 int rc = VERR_INTERNAL_ERROR_5;
2705#ifndef IN_NEM_DARWIN
2706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2707#endif
2708 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2709 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2710#ifndef IN_NEM_DARWIN
2711 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2712#endif
2713
2714 /*
2715 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2716 */
2717 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2718 {
2719 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2720 {
2721 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2722#ifndef IN_NEM_DARWIN
2723 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2724 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2725#endif
2726 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2727 AssertRC(rc);
2728 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2729 }
2730
2731 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2732 {
2733 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2734#ifndef IN_NEM_DARWIN
2735 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2736 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2737#endif
2738 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2739 AssertRC(rc);
2740 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2741 }
2742
2743 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2744 {
2745 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2746#ifndef IN_NEM_DARWIN
2747 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2748 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2749#endif
2750 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2751 AssertRC(rc);
2752 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2753 }
2754
2755 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2756 {
2757 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2758#ifndef IN_NEM_DARWIN
2759 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2760 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2761#endif
2762 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2763 AssertRC(rc);
2764 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2765 }
2766
2767 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2768 {
2769 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2770#ifndef IN_NEM_DARWIN
2771 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2772 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2773#endif
2774 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2775 AssertRC(rc);
2776 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2777 }
2778
2779 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2780 {
2781 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2782#ifndef IN_NEM_DARWIN
2783 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2784 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2785#endif
2786 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2787 AssertRC(rc);
2788 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2789 }
2790
2791#ifdef VBOX_STRICT
2792 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2793#endif
2794 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2795 pCtx->cs.Attr.u));
2796 }
2797
2798 /*
2799 * Guest TR.
2800 */
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2804
2805 /*
2806 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2807 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2808 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2809 */
2810 uint16_t u16Sel;
2811 uint32_t u32Limit;
2812 uint64_t u64Base;
2813 uint32_t u32AccessRights;
2814#ifndef IN_NEM_DARWIN
2815 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2816#endif
2817 {
2818 u16Sel = pCtx->tr.Sel;
2819 u32Limit = pCtx->tr.u32Limit;
2820 u64Base = pCtx->tr.u64Base;
2821 u32AccessRights = pCtx->tr.Attr.u;
2822 }
2823#ifndef IN_NEM_DARWIN
2824 else
2825 {
2826 Assert(!pVmxTransient->fIsNestedGuest);
2827 Assert(pVM->hm.s.vmx.pRealModeTSS);
2828 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2829
2830 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2831 RTGCPHYS GCPhys;
2832 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2833 AssertRCReturn(rc, rc);
2834
2835 X86DESCATTR DescAttr;
2836 DescAttr.u = 0;
2837 DescAttr.n.u1Present = 1;
2838 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2839
2840 u16Sel = 0;
2841 u32Limit = HM_VTX_TSS_SIZE;
2842 u64Base = GCPhys;
2843 u32AccessRights = DescAttr.u;
2844 }
2845#endif
2846
2847 /* Validate. */
2848 Assert(!(u16Sel & RT_BIT(2)));
2849 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2850 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2851 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2852 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2853 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2854 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2855 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2856 Assert( (u32Limit & 0xfff) == 0xfff
2857 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2858 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2859 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2860
2861 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2865
2866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2867 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2868 }
2869
2870 /*
2871 * Guest GDTR.
2872 */
2873 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2874 {
2875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2876
2877 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2878 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2879
2880 /* Validate. */
2881 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2882
2883 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2884 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2885 }
2886
2887 /*
2888 * Guest LDTR.
2889 */
2890 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2891 {
2892 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2893
2894 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2895 uint32_t u32Access;
2896 if ( !pVmxTransient->fIsNestedGuest
2897 && !pCtx->ldtr.Attr.u)
2898 u32Access = X86DESCATTR_UNUSABLE;
2899 else
2900 u32Access = pCtx->ldtr.Attr.u;
2901
2902 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2906
2907 /* Validate. */
2908 if (!(u32Access & X86DESCATTR_UNUSABLE))
2909 {
2910 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2911 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2912 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2913 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2914 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2915 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2916 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2917 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2918 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2919 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2920 }
2921
2922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2923 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2924 }
2925
2926 /*
2927 * Guest IDTR.
2928 */
2929 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2930 {
2931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2932
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2935
2936 /* Validate. */
2937 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2938
2939 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2940 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2941 }
2942
2943 return VINF_SUCCESS;
2944}
2945
2946
2947/**
2948 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2949 * VM-exit interruption info type.
2950 *
2951 * @returns The IEM exception flags.
2952 * @param uVector The event vector.
2953 * @param uVmxEventType The VMX event type.
2954 *
2955 * @remarks This function currently only constructs flags required for
2956 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2957 * and CR2 aspects of an exception are not included).
2958 */
2959static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2960{
2961 uint32_t fIemXcptFlags;
2962 switch (uVmxEventType)
2963 {
2964 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2965 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2966 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2967 break;
2968
2969 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2971 break;
2972
2973 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2975 break;
2976
2977 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2978 {
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2980 if (uVector == X86_XCPT_BP)
2981 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2982 else if (uVector == X86_XCPT_OF)
2983 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2984 else
2985 {
2986 fIemXcptFlags = 0;
2987 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2988 }
2989 break;
2990 }
2991
2992 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2993 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2994 break;
2995
2996 default:
2997 fIemXcptFlags = 0;
2998 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2999 break;
3000 }
3001 return fIemXcptFlags;
3002}
3003
3004
3005/**
3006 * Sets an event as a pending event to be injected into the guest.
3007 *
3008 * @param pVCpu The cross context virtual CPU structure.
3009 * @param u32IntInfo The VM-entry interruption-information field.
3010 * @param cbInstr The VM-entry instruction length in bytes (for
3011 * software interrupts, exceptions and privileged
3012 * software exceptions).
3013 * @param u32ErrCode The VM-entry exception error code.
3014 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3015 * page-fault.
3016 */
3017DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3018 RTGCUINTPTR GCPtrFaultAddress)
3019{
3020 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3021 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3022 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3024 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3025 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3026}
3027
3028
3029/**
3030 * Sets an external interrupt as pending-for-injection into the VM.
3031 *
3032 * @param pVCpu The cross context virtual CPU structure.
3033 * @param u8Interrupt The external interrupt vector.
3034 */
3035DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3036{
3037 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3041 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3042}
3043
3044
3045/**
3046 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3047 *
3048 * @param pVCpu The cross context virtual CPU structure.
3049 */
3050DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3051{
3052 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3053 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3056 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3057}
3058
3059
3060/**
3061 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3062 *
3063 * @param pVCpu The cross context virtual CPU structure.
3064 */
3065DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3106/**
3107 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 * @param u32ErrCode The error code for the general-protection exception.
3111 */
3112DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3113{
3114 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3118 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3119}
3120
3121
3122/**
3123 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param u32ErrCode The error code for the stack exception.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3135}
3136#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3137
3138
3139/**
3140 * Fixes up attributes for the specified segment register.
3141 *
3142 * @param pVCpu The cross context virtual CPU structure.
3143 * @param pSelReg The segment register that needs fixing.
3144 * @param pszRegName The register name (for logging and assertions).
3145 */
3146static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3147{
3148 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3149
3150 /*
3151 * If VT-x marks the segment as unusable, most other bits remain undefined:
3152 * - For CS the L, D and G bits have meaning.
3153 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3154 * - For the remaining data segments no bits are defined.
3155 *
3156 * The present bit and the unusable bit has been observed to be set at the
3157 * same time (the selector was supposed to be invalid as we started executing
3158 * a V8086 interrupt in ring-0).
3159 *
3160 * What should be important for the rest of the VBox code, is that the P bit is
3161 * cleared. Some of the other VBox code recognizes the unusable bit, but
3162 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3163 * safe side here, we'll strip off P and other bits we don't care about. If
3164 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3165 *
3166 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3167 */
3168#ifdef VBOX_STRICT
3169 uint32_t const uAttr = pSelReg->Attr.u;
3170#endif
3171
3172 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3173 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3174 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3175
3176#ifdef VBOX_STRICT
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Disable(pVCpu);
3179# endif
3180 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3181# ifdef DEBUG_bird
3182 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3183 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3184 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3185# endif
3186# ifndef IN_NEM_DARWIN
3187 VMMRZCallRing3Enable(pVCpu);
3188# endif
3189 NOREF(uAttr);
3190#endif
3191 RT_NOREF2(pVCpu, pszRegName);
3192}
3193
3194
3195/**
3196 * Imports a guest segment register from the current VMCS into the guest-CPU
3197 * context.
3198 *
3199 * @param pVCpu The cross context virtual CPU structure.
3200 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3201 *
3202 * @remarks Called with interrupts and/or preemption disabled.
3203 */
3204template<uint32_t const a_iSegReg>
3205DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3206{
3207 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3208 /* Check that the macros we depend upon here and in the export parenter function works: */
3209#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3210 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3211 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3212 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3213 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3216 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3217 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3218 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3219 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3220
3221 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3222
3223 uint16_t u16Sel;
3224 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3225 pSelReg->Sel = u16Sel;
3226 pSelReg->ValidSel = u16Sel;
3227
3228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3229 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3230
3231 uint32_t u32Attr;
3232 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3233 pSelReg->Attr.u = u32Attr;
3234 if (u32Attr & X86DESCATTR_UNUSABLE)
3235 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3236
3237 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3238}
3239
3240
3241/**
3242 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure.
3245 *
3246 * @remarks Called with interrupts and/or preemption disabled.
3247 */
3248DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3249{
3250 uint16_t u16Sel;
3251 uint64_t u64Base;
3252 uint32_t u32Limit, u32Attr;
3253 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3255 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3257
3258 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3259 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3260 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3261 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3262 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3263 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3264 if (u32Attr & X86DESCATTR_UNUSABLE)
3265 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3266}
3267
3268
3269/**
3270 * Imports the guest TR from the VMCS into the guest-CPU context.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure.
3273 *
3274 * @remarks Called with interrupts and/or preemption disabled.
3275 */
3276DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3277{
3278 uint16_t u16Sel;
3279 uint64_t u64Base;
3280 uint32_t u32Limit, u32Attr;
3281 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3282 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3283 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3284 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3285
3286 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3287 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3288 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3289 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3290 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3291 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3292 /* TR is the only selector that can never be unusable. */
3293 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3294}
3295
3296
3297/**
3298 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3299 *
3300 * @returns The RIP value.
3301 * @param pVCpu The cross context virtual CPU structure.
3302 *
3303 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3304 * @remarks Do -not- call this function directly!
3305 */
3306DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3307{
3308 uint64_t u64Val;
3309 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3310 AssertRC(rc);
3311
3312 pVCpu->cpum.GstCtx.rip = u64Val;
3313
3314 return u64Val;
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3330 {
3331 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3332 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3333 }
3334}
3335
3336
3337/**
3338 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3339 *
3340 * @param pVCpu The cross context virtual CPU structure.
3341 * @param pVmcsInfo The VMCS info. object.
3342 *
3343 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3344 * @remarks Do -not- call this function directly!
3345 */
3346DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3347{
3348 uint64_t fRFlags;
3349 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3350 AssertRC(rc);
3351
3352 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3353 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3354
3355 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3356#ifndef IN_NEM_DARWIN
3357 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3358 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3359 { /* mostly likely */ }
3360 else
3361 {
3362 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3363 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3364 }
3365#else
3366 RT_NOREF(pVmcsInfo);
3367#endif
3368}
3369
3370
3371/**
3372 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3373 *
3374 * @param pVCpu The cross context virtual CPU structure.
3375 * @param pVmcsInfo The VMCS info. object.
3376 *
3377 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3378 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3379 * instead!!!
3380 */
3381DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3382{
3383 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3384 {
3385 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3386 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3387 }
3388}
3389
3390
3391#ifndef IN_NEM_DARWIN
3392/**
3393 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3394 * context.
3395 *
3396 * The other MSRs are in the VM-exit MSR-store.
3397 *
3398 * @returns VBox status code.
3399 * @param pVCpu The cross context virtual CPU structure.
3400 * @param pVmcsInfo The VMCS info. object.
3401 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3402 * unexpected errors). Ignored in NEM/darwin context.
3403 */
3404DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3405{
3406 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3407 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3408 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3409 Assert(pMsrs);
3410 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3411 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3412 for (uint32_t i = 0; i < cMsrs; i++)
3413 {
3414 uint32_t const idMsr = pMsrs[i].u32Msr;
3415 switch (idMsr)
3416 {
3417 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3418 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3419 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3420 default:
3421 {
3422 uint32_t idxLbrMsr;
3423 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3424 if (VM_IS_VMX_LBR(pVM))
3425 {
3426 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3427 {
3428 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3429 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3430 break;
3431 }
3432 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3433 {
3434 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3435 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3436 break;
3437 }
3438 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3439 {
3440 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3441 break;
3442 }
3443 /* Fallthru (no break) */
3444 }
3445 pVCpu->cpum.GstCtx.fExtrn = 0;
3446 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3447 ASMSetFlags(fEFlags);
3448 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3449 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3450 }
3451 }
3452 }
3453 return VINF_SUCCESS;
3454}
3455#endif /* !IN_NEM_DARWIN */
3456
3457
3458/**
3459 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3460 *
3461 * @param pVCpu The cross context virtual CPU structure.
3462 * @param pVmcsInfo The VMCS info. object.
3463 */
3464DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3465{
3466 uint64_t u64Cr0;
3467 uint64_t u64Shadow;
3468 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3469 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3470#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3471 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3472 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3473#else
3474 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3475 {
3476 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3477 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3478 }
3479 else
3480 {
3481 /*
3482 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3483 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3484 * re-construct CR0. See @bugref{9180#c95} for details.
3485 */
3486 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3487 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3488 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3489 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3490 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3491 Assert(u64Cr0 & X86_CR0_NE);
3492 }
3493#endif
3494
3495#ifndef IN_NEM_DARWIN
3496 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3497#endif
3498 CPUMSetGuestCR0(pVCpu, u64Cr0);
3499#ifndef IN_NEM_DARWIN
3500 VMMRZCallRing3Enable(pVCpu);
3501#endif
3502}
3503
3504
3505/**
3506 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3507 *
3508 * @param pVCpu The cross context virtual CPU structure.
3509 */
3510DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3511{
3512 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3513 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3514
3515 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3516 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3517 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3518 && CPUMIsGuestPagingEnabledEx(pCtx)))
3519 {
3520 uint64_t u64Cr3;
3521 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3522 if (pCtx->cr3 != u64Cr3)
3523 {
3524 pCtx->cr3 = u64Cr3;
3525 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3526 }
3527
3528 /*
3529 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3530 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3531 */
3532 if (CPUMIsGuestInPAEModeEx(pCtx))
3533 {
3534 X86PDPE aPaePdpes[4];
3535 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3536 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3537 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3538 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3539 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3540 {
3541 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3542 /* PGM now updates PAE PDPTEs while updating CR3. */
3543 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3544 }
3545 }
3546 }
3547}
3548
3549
3550/**
3551 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3552 *
3553 * @param pVCpu The cross context virtual CPU structure.
3554 * @param pVmcsInfo The VMCS info. object.
3555 */
3556DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3557{
3558 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3559 uint64_t u64Cr4;
3560 uint64_t u64Shadow;
3561 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3562 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3563#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3564 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3565 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3566#else
3567 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3568 {
3569 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3570 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3571 }
3572 else
3573 {
3574 /*
3575 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3576 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3577 * re-construct CR4. See @bugref{9180#c95} for details.
3578 */
3579 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3580 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3581 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3582 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3583 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3584 Assert(u64Cr4 & X86_CR4_VMXE);
3585 }
3586#endif
3587 pCtx->cr4 = u64Cr4;
3588}
3589
3590
3591/**
3592 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3593 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3594 */
3595DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3596{
3597 /*
3598 * We must import RIP here to set our EM interrupt-inhibited state.
3599 * We also import RFLAGS as our code that evaluates pending interrupts
3600 * before VM-entry requires it.
3601 */
3602 vmxHCImportGuestRip(pVCpu);
3603 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3604
3605 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3606 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3607 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3608 pVCpu->cpum.GstCtx.rip);
3609 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3610}
3611
3612
3613/**
3614 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3615 * context.
3616 *
3617 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3618 *
3619 * @param pVCpu The cross context virtual CPU structure.
3620 * @param pVmcsInfo The VMCS info. object.
3621 *
3622 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3623 * do not log!
3624 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3625 * instead!!!
3626 */
3627DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3628{
3629 uint32_t u32Val;
3630 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3631 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3632 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3633 if (!u32Val)
3634 {
3635 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3636 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3637 }
3638 else
3639 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3640}
3641
3642
3643/**
3644 * Worker for VMXR0ImportStateOnDemand.
3645 *
3646 * @returns VBox status code.
3647 * @param pVCpu The cross context virtual CPU structure.
3648 * @param pVmcsInfo The VMCS info. object.
3649 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3650 */
3651static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3652{
3653 int rc = VINF_SUCCESS;
3654 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3655 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3656 uint32_t u32Val;
3657
3658 /*
3659 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3660 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3661 * neither are other host platforms.
3662 *
3663 * Committing this temporarily as it prevents BSOD.
3664 *
3665 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3666 */
3667#ifdef RT_OS_WINDOWS
3668 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3669 return VERR_HM_IPE_1;
3670#endif
3671
3672 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3673
3674#ifndef IN_NEM_DARWIN
3675 /*
3676 * We disable interrupts to make the updating of the state and in particular
3677 * the fExtrn modification atomic wrt to preemption hooks.
3678 */
3679 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3680#endif
3681
3682 fWhat &= pCtx->fExtrn;
3683 if (fWhat)
3684 {
3685 do
3686 {
3687 if (fWhat & CPUMCTX_EXTRN_RIP)
3688 vmxHCImportGuestRip(pVCpu);
3689
3690 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3691 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3692
3693 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3694 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3695 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3696
3697 if (fWhat & CPUMCTX_EXTRN_RSP)
3698 {
3699 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3700 AssertRC(rc);
3701 }
3702
3703 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3704 {
3705 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3706#ifndef IN_NEM_DARWIN
3707 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3708#else
3709 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3710#endif
3711 if (fWhat & CPUMCTX_EXTRN_CS)
3712 {
3713 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3714 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3715 if (fRealOnV86Active)
3716 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3717 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3718 }
3719 if (fWhat & CPUMCTX_EXTRN_SS)
3720 {
3721 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3722 if (fRealOnV86Active)
3723 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3724 }
3725 if (fWhat & CPUMCTX_EXTRN_DS)
3726 {
3727 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3728 if (fRealOnV86Active)
3729 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3730 }
3731 if (fWhat & CPUMCTX_EXTRN_ES)
3732 {
3733 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3734 if (fRealOnV86Active)
3735 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3736 }
3737 if (fWhat & CPUMCTX_EXTRN_FS)
3738 {
3739 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3740 if (fRealOnV86Active)
3741 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3742 }
3743 if (fWhat & CPUMCTX_EXTRN_GS)
3744 {
3745 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3746 if (fRealOnV86Active)
3747 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3748 }
3749 }
3750
3751 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3752 {
3753 if (fWhat & CPUMCTX_EXTRN_LDTR)
3754 vmxHCImportGuestLdtr(pVCpu);
3755
3756 if (fWhat & CPUMCTX_EXTRN_GDTR)
3757 {
3758 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3759 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3760 pCtx->gdtr.cbGdt = u32Val;
3761 }
3762
3763 /* Guest IDTR. */
3764 if (fWhat & CPUMCTX_EXTRN_IDTR)
3765 {
3766 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3767 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3768 pCtx->idtr.cbIdt = u32Val;
3769 }
3770
3771 /* Guest TR. */
3772 if (fWhat & CPUMCTX_EXTRN_TR)
3773 {
3774#ifndef IN_NEM_DARWIN
3775 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3776 don't need to import that one. */
3777 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3778#endif
3779 vmxHCImportGuestTr(pVCpu);
3780 }
3781 }
3782
3783 if (fWhat & CPUMCTX_EXTRN_DR7)
3784 {
3785#ifndef IN_NEM_DARWIN
3786 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3787#endif
3788 {
3789 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3790 AssertRC(rc);
3791 }
3792 }
3793
3794 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3795 {
3796 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3797 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3798 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3799 pCtx->SysEnter.cs = u32Val;
3800 }
3801
3802#ifndef IN_NEM_DARWIN
3803 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3804 {
3805 if ( pVM->hmr0.s.fAllow64BitGuests
3806 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3807 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3808 }
3809
3810 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3811 {
3812 if ( pVM->hmr0.s.fAllow64BitGuests
3813 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3814 {
3815 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3816 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3817 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3818 }
3819 }
3820
3821 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3822 {
3823 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3824 AssertRCReturn(rc, rc);
3825 }
3826#else
3827 NOREF(pVM);
3828#endif
3829
3830 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3831 {
3832 if (fWhat & CPUMCTX_EXTRN_CR0)
3833 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3834
3835 if (fWhat & CPUMCTX_EXTRN_CR4)
3836 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3837
3838 if (fWhat & CPUMCTX_EXTRN_CR3)
3839 vmxHCImportGuestCr3(pVCpu);
3840 }
3841
3842#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3843 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3844 {
3845 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3846 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3847 {
3848 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3849 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3850 if (RT_SUCCESS(rc))
3851 { /* likely */ }
3852 else
3853 break;
3854 }
3855 }
3856#endif
3857 } while (0);
3858
3859 if (RT_SUCCESS(rc))
3860 {
3861 /* Update fExtrn. */
3862 pCtx->fExtrn &= ~fWhat;
3863
3864 /* If everything has been imported, clear the HM keeper bit. */
3865 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3866 {
3867#ifndef IN_NEM_DARWIN
3868 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3869#else
3870 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3871#endif
3872 Assert(!pCtx->fExtrn);
3873 }
3874 }
3875 }
3876#ifndef IN_NEM_DARWIN
3877 else
3878 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3879
3880 /*
3881 * Restore interrupts.
3882 */
3883 ASMSetFlags(fEFlags);
3884#endif
3885
3886 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3887
3888 if (RT_SUCCESS(rc))
3889 { /* likely */ }
3890 else
3891 return rc;
3892
3893 /*
3894 * Honor any pending CR3 updates.
3895 *
3896 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3897 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3898 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3899 *
3900 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3901 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3902 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3903 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3904 *
3905 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3906 *
3907 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3908 */
3909 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3910#ifndef IN_NEM_DARWIN
3911 && VMMRZCallRing3IsEnabled(pVCpu)
3912#endif
3913 )
3914 {
3915 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3916 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3917 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3918 }
3919
3920 return VINF_SUCCESS;
3921}
3922
3923
3924/**
3925 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3926 *
3927 * @returns VBox status code.
3928 * @param pVCpu The cross context virtual CPU structure.
3929 * @param pVmcsInfo The VMCS info. object.
3930 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3931 * in NEM/darwin context.
3932 * @tparam a_fWhat What to import, zero or more bits from
3933 * HMVMX_CPUMCTX_EXTRN_ALL.
3934 */
3935template<uint64_t const a_fWhat>
3936static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3937{
3938 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3939 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3940 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3941 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3942
3943 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3944
3945 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3946
3947 /* RIP and RFLAGS may have been imported already by the post exit code
3948 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3949 of the code is skipping this part of the code. */
3950 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3951 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3952 {
3953 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3954 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3955
3956 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3957 {
3958 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3959 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3960 else
3961 vmxHCImportGuestCoreRip(pVCpu);
3962 }
3963 }
3964
3965 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3966 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3967 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3968
3969 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3970 {
3971 if (a_fWhat & CPUMCTX_EXTRN_CS)
3972 {
3973 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3974 /** @todo try get rid of this carp, it smells and is probably never ever
3975 * used: */
3976 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3977 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3978 {
3979 vmxHCImportGuestCoreRip(pVCpu);
3980 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3981 }
3982 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3983 }
3984 if (a_fWhat & CPUMCTX_EXTRN_SS)
3985 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3986 if (a_fWhat & CPUMCTX_EXTRN_DS)
3987 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3988 if (a_fWhat & CPUMCTX_EXTRN_ES)
3989 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3990 if (a_fWhat & CPUMCTX_EXTRN_FS)
3991 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3992 if (a_fWhat & CPUMCTX_EXTRN_GS)
3993 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3994
3995 /* Guest TR.
3996 Real-mode emulation using virtual-8086 mode has the fake TSS
3997 (pRealModeTSS) in TR, don't need to import that one. */
3998#ifndef IN_NEM_DARWIN
3999 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4000 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4001 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4002#else
4003 if (a_fWhat & CPUMCTX_EXTRN_TR)
4004#endif
4005 vmxHCImportGuestTr(pVCpu);
4006
4007#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4008 if (fRealOnV86Active)
4009 {
4010 if (a_fWhat & CPUMCTX_EXTRN_CS)
4011 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4012 if (a_fWhat & CPUMCTX_EXTRN_SS)
4013 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4014 if (a_fWhat & CPUMCTX_EXTRN_DS)
4015 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4016 if (a_fWhat & CPUMCTX_EXTRN_ES)
4017 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4018 if (a_fWhat & CPUMCTX_EXTRN_FS)
4019 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4020 if (a_fWhat & CPUMCTX_EXTRN_GS)
4021 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4022 }
4023#endif
4024 }
4025
4026 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4027 {
4028 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4029 AssertRC(rc);
4030 }
4031
4032 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4033 vmxHCImportGuestLdtr(pVCpu);
4034
4035 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4036 {
4037 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4038 uint32_t u32Val;
4039 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4040 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4041 }
4042
4043 /* Guest IDTR. */
4044 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4045 {
4046 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4047 uint32_t u32Val;
4048 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4049 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4050 }
4051
4052 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4053 {
4054#ifndef IN_NEM_DARWIN
4055 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4056#endif
4057 {
4058 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4059 AssertRC(rc);
4060 }
4061 }
4062
4063 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4064 {
4065 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4066 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4067 uint32_t u32Val;
4068 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4069 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4070 }
4071
4072#ifndef IN_NEM_DARWIN
4073 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4074 {
4075 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4076 && pVM->hmr0.s.fAllow64BitGuests)
4077 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4078 }
4079
4080 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4081 {
4082 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4083 && pVM->hmr0.s.fAllow64BitGuests)
4084 {
4085 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4086 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4087 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4088 }
4089 }
4090
4091 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4092 {
4093 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4094 AssertRCReturn(rc1, rc1);
4095 }
4096#else
4097 NOREF(pVM);
4098#endif
4099
4100 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4101 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4102
4103 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4104 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4105
4106 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4107 vmxHCImportGuestCr3(pVCpu);
4108
4109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4110 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4111 {
4112 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4113 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4114 {
4115 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4116 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4117 AssertRCReturn(rc, rc);
4118 }
4119 }
4120#endif
4121
4122 /* Update fExtrn. */
4123 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4124
4125 /* If everything has been imported, clear the HM keeper bit. */
4126 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4127 {
4128#ifndef IN_NEM_DARWIN
4129 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4130#else
4131 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4132#endif
4133 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4134 }
4135
4136 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4137
4138 /*
4139 * Honor any pending CR3 updates.
4140 *
4141 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4142 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4143 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4144 *
4145 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4146 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4147 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4148 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4149 *
4150 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4151 *
4152 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4153 */
4154#ifndef IN_NEM_DARWIN
4155 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4156 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4157 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4158 return VINF_SUCCESS;
4159 ASMSetFlags(fEFlags);
4160#else
4161 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4162 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4163 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4164 return VINF_SUCCESS;
4165 RT_NOREF_PV(fEFlags);
4166#endif
4167
4168 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4169 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4170 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4171 return VINF_SUCCESS;
4172}
4173
4174
4175/**
4176 * Internal state fetcher.
4177 *
4178 * @returns VBox status code.
4179 * @param pVCpu The cross context virtual CPU structure.
4180 * @param pVmcsInfo The VMCS info. object.
4181 * @param pszCaller For logging.
4182 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4183 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4184 * already. This is ORed together with @a a_fWhat when
4185 * calculating what needs fetching (just for safety).
4186 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4187 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4188 * already. This is ORed together with @a a_fWhat when
4189 * calculating what needs fetching (just for safety).
4190 */
4191template<uint64_t const a_fWhat,
4192 uint64_t const a_fDoneLocal = 0,
4193 uint64_t const a_fDonePostExit = 0
4194#ifndef IN_NEM_DARWIN
4195 | CPUMCTX_EXTRN_INHIBIT_INT
4196 | CPUMCTX_EXTRN_INHIBIT_NMI
4197# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4198 | HMVMX_CPUMCTX_EXTRN_ALL
4199# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4200 | CPUMCTX_EXTRN_RFLAGS
4201# endif
4202#else /* IN_NEM_DARWIN */
4203 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4204#endif /* IN_NEM_DARWIN */
4205>
4206DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4207{
4208 RT_NOREF_PV(pszCaller);
4209 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4210 {
4211#ifndef IN_NEM_DARWIN
4212 /*
4213 * We disable interrupts to make the updating of the state and in particular
4214 * the fExtrn modification atomic wrt to preemption hooks.
4215 */
4216 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4217#else
4218 RTCCUINTREG const fEFlags = 0;
4219#endif
4220
4221 /*
4222 * We combine all three parameters and take the (probably) inlined optimized
4223 * code path for the new things specified in a_fWhat.
4224 *
4225 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4226 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4227 * also take the streamlined path when both of these are cleared in fExtrn
4228 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4229 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4230 */
4231 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4232 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4233 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4234 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4235 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4236 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4237 {
4238 int const rc = vmxHCImportGuestStateInner< a_fWhat
4239 & HMVMX_CPUMCTX_EXTRN_ALL
4240 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4241#ifndef IN_NEM_DARWIN
4242 ASMSetFlags(fEFlags);
4243#endif
4244 return rc;
4245 }
4246
4247#ifndef IN_NEM_DARWIN
4248 ASMSetFlags(fEFlags);
4249#endif
4250
4251 /*
4252 * We shouldn't normally get here, but it may happen when executing
4253 * in the debug run-loops. Typically, everything should already have
4254 * been fetched then. Otherwise call the fallback state import function.
4255 */
4256 if (fWhatToDo == 0)
4257 { /* hope the cause was the debug loop or something similar */ }
4258 else
4259 {
4260 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4261 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4262 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4263 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4264 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4265 }
4266 }
4267 return VINF_SUCCESS;
4268}
4269
4270
4271/**
4272 * Check per-VM and per-VCPU force flag actions that require us to go back to
4273 * ring-3 for one reason or another.
4274 *
4275 * @returns Strict VBox status code (i.e. informational status codes too)
4276 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4277 * ring-3.
4278 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4279 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4280 * interrupts)
4281 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4282 * all EMTs to be in ring-3.
4283 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4284 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4285 * to the EM loop.
4286 *
4287 * @param pVCpu The cross context virtual CPU structure.
4288 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4289 * @param fStepping Whether we are single-stepping the guest using the
4290 * hypervisor debugger.
4291 *
4292 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4293 * is no longer in VMX non-root mode.
4294 */
4295static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4296{
4297#ifndef IN_NEM_DARWIN
4298 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4299#endif
4300
4301 /*
4302 * Update pending interrupts into the APIC's IRR.
4303 */
4304 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4305 APICUpdatePendingInterrupts(pVCpu);
4306
4307 /*
4308 * Anything pending? Should be more likely than not if we're doing a good job.
4309 */
4310 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4311 if ( !fStepping
4312 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4313 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4314 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4315 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4316 return VINF_SUCCESS;
4317
4318 /* Pending PGM C3 sync. */
4319 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4320 {
4321 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4322 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4323 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4324 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4325 if (rcStrict != VINF_SUCCESS)
4326 {
4327 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4328 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4329 return rcStrict;
4330 }
4331 }
4332
4333 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4334 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4335 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4336 {
4337 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4338 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4339 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4340 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4341 return rc;
4342 }
4343
4344 /* Pending VM request packets, such as hardware interrupts. */
4345 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4346 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4347 {
4348 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4349 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4350 return VINF_EM_PENDING_REQUEST;
4351 }
4352
4353 /* Pending PGM pool flushes. */
4354 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4355 {
4356 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4357 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4358 return VINF_PGM_POOL_FLUSH_PENDING;
4359 }
4360
4361 /* Pending DMA requests. */
4362 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4363 {
4364 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4365 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4366 return VINF_EM_RAW_TO_R3;
4367 }
4368
4369#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4370 /*
4371 * Pending nested-guest events.
4372 *
4373 * Please note the priority of these events are specified and important.
4374 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4375 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4376 */
4377 if (fIsNestedGuest)
4378 {
4379 /* Pending nested-guest APIC-write. */
4380 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4381 {
4382 Log4Func(("Pending nested-guest APIC-write\n"));
4383 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4384 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4385 return rcStrict;
4386 }
4387
4388 /* Pending nested-guest monitor-trap flag (MTF). */
4389 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4390 {
4391 Log4Func(("Pending nested-guest MTF\n"));
4392 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4393 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4394 return rcStrict;
4395 }
4396
4397 /* Pending nested-guest VMX-preemption timer expired. */
4398 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4399 {
4400 Log4Func(("Pending nested-guest preempt timer\n"));
4401 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4402 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4403 return rcStrict;
4404 }
4405 }
4406#else
4407 NOREF(fIsNestedGuest);
4408#endif
4409
4410 return VINF_SUCCESS;
4411}
4412
4413
4414/**
4415 * Converts any TRPM trap into a pending HM event. This is typically used when
4416 * entering from ring-3 (not longjmp returns).
4417 *
4418 * @param pVCpu The cross context virtual CPU structure.
4419 */
4420static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4421{
4422 Assert(TRPMHasTrap(pVCpu));
4423 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4424
4425 uint8_t uVector;
4426 TRPMEVENT enmTrpmEvent;
4427 uint32_t uErrCode;
4428 RTGCUINTPTR GCPtrFaultAddress;
4429 uint8_t cbInstr;
4430 bool fIcebp;
4431
4432 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4433 AssertRC(rc);
4434
4435 uint32_t u32IntInfo;
4436 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4437 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4438
4439 rc = TRPMResetTrap(pVCpu);
4440 AssertRC(rc);
4441 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4442 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4443
4444 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4445}
4446
4447
4448/**
4449 * Converts the pending HM event into a TRPM trap.
4450 *
4451 * @param pVCpu The cross context virtual CPU structure.
4452 */
4453static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4454{
4455 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4456
4457 /* If a trap was already pending, we did something wrong! */
4458 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4459
4460 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4461 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4462 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4463
4464 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4465
4466 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4467 AssertRC(rc);
4468
4469 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4470 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4471
4472 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4473 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4474 else
4475 {
4476 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4477 switch (uVectorType)
4478 {
4479 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4480 TRPMSetTrapDueToIcebp(pVCpu);
4481 RT_FALL_THRU();
4482 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4483 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4484 {
4485 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4486 || ( uVector == X86_XCPT_BP /* INT3 */
4487 || uVector == X86_XCPT_OF /* INTO */
4488 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4489 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4490 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4491 break;
4492 }
4493 }
4494 }
4495
4496 /* We're now done converting the pending event. */
4497 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4498}
4499
4500
4501/**
4502 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4503 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4504 *
4505 * @param pVCpu The cross context virtual CPU structure.
4506 * @param pVmcsInfo The VMCS info. object.
4507 */
4508static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4509{
4510 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4511 {
4512 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4513 {
4514 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4515 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4516 AssertRC(rc);
4517 }
4518 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4519}
4520
4521
4522/**
4523 * Clears the interrupt-window exiting control in the VMCS.
4524 *
4525 * @param pVCpu The cross context virtual CPU structure.
4526 * @param pVmcsInfo The VMCS info. object.
4527 */
4528DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4529{
4530 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4531 {
4532 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4533 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4534 AssertRC(rc);
4535 }
4536}
4537
4538
4539/**
4540 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4541 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4542 *
4543 * @param pVCpu The cross context virtual CPU structure.
4544 * @param pVmcsInfo The VMCS info. object.
4545 */
4546static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4547{
4548 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4549 {
4550 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4551 {
4552 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4553 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4554 AssertRC(rc);
4555 Log4Func(("Setup NMI-window exiting\n"));
4556 }
4557 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4558}
4559
4560
4561/**
4562 * Clears the NMI-window exiting control in the VMCS.
4563 *
4564 * @param pVCpu The cross context virtual CPU structure.
4565 * @param pVmcsInfo The VMCS info. object.
4566 */
4567DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4568{
4569 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4570 {
4571 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4572 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4573 AssertRC(rc);
4574 }
4575}
4576
4577
4578/**
4579 * Injects an event into the guest upon VM-entry by updating the relevant fields
4580 * in the VM-entry area in the VMCS.
4581 *
4582 * @returns Strict VBox status code (i.e. informational status codes too).
4583 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4584 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4585 *
4586 * @param pVCpu The cross context virtual CPU structure.
4587 * @param pVmcsInfo The VMCS info object.
4588 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4589 * @param pEvent The event being injected.
4590 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4591 * will be updated if necessary. This cannot not be NULL.
4592 * @param fStepping Whether we're single-stepping guest execution and should
4593 * return VINF_EM_DBG_STEPPED if the event is injected
4594 * directly (registers modified by us, not by hardware on
4595 * VM-entry).
4596 */
4597static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4598 bool fStepping, uint32_t *pfIntrState)
4599{
4600 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4601 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4602 Assert(pfIntrState);
4603
4604#ifdef IN_NEM_DARWIN
4605 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4606#endif
4607
4608 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4609 uint32_t u32IntInfo = pEvent->u64IntInfo;
4610 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4611 uint32_t const cbInstr = pEvent->cbInstr;
4612 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4613 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4614 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4615
4616#ifdef VBOX_STRICT
4617 /*
4618 * Validate the error-code-valid bit for hardware exceptions.
4619 * No error codes for exceptions in real-mode.
4620 *
4621 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4622 */
4623 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4624 && !CPUMIsGuestInRealModeEx(pCtx))
4625 {
4626 switch (uVector)
4627 {
4628 case X86_XCPT_PF:
4629 case X86_XCPT_DF:
4630 case X86_XCPT_TS:
4631 case X86_XCPT_NP:
4632 case X86_XCPT_SS:
4633 case X86_XCPT_GP:
4634 case X86_XCPT_AC:
4635 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4636 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4637 RT_FALL_THRU();
4638 default:
4639 break;
4640 }
4641 }
4642
4643 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4644 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4645 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4646#endif
4647
4648 RT_NOREF(uVector);
4649 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4650 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4651 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4652 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4653 {
4654 Assert(uVector <= X86_XCPT_LAST);
4655 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4656 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4657 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4658 }
4659 else
4660 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4661
4662 /*
4663 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4664 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4665 * interrupt handler in the (real-mode) guest.
4666 *
4667 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4668 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4669 */
4670 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4671 {
4672#ifndef IN_NEM_DARWIN
4673 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4674#endif
4675 {
4676 /*
4677 * For CPUs with unrestricted guest execution enabled and with the guest
4678 * in real-mode, we must not set the deliver-error-code bit.
4679 *
4680 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4681 */
4682 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4683 }
4684#ifndef IN_NEM_DARWIN
4685 else
4686 {
4687 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4688 Assert(PDMVmmDevHeapIsEnabled(pVM));
4689 Assert(pVM->hm.s.vmx.pRealModeTSS);
4690 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4691
4692 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4693 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4694 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4695 AssertRCReturn(rc2, rc2);
4696
4697 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4698 size_t const cbIdtEntry = sizeof(X86IDTR16);
4699 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4700 {
4701 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4702 if (uVector == X86_XCPT_DF)
4703 return VINF_EM_RESET;
4704
4705 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4706 No error codes for exceptions in real-mode. */
4707 if (uVector == X86_XCPT_GP)
4708 {
4709 static HMEVENT const s_EventXcptDf
4710 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4711 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4712 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4713 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4714 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4715 }
4716
4717 /*
4718 * If we're injecting an event with no valid IDT entry, inject a #GP.
4719 * No error codes for exceptions in real-mode.
4720 *
4721 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4722 */
4723 static HMEVENT const s_EventXcptGp
4724 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4725 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4726 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4727 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4728 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4729 }
4730
4731 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4732 uint16_t uGuestIp = pCtx->ip;
4733 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4734 {
4735 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4736 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4737 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4738 }
4739 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4740 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4741
4742 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4743 X86IDTR16 IdtEntry;
4744 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4745 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4746 AssertRCReturn(rc2, rc2);
4747
4748 /* Construct the stack frame for the interrupt/exception handler. */
4749 VBOXSTRICTRC rcStrict;
4750 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4751 if (rcStrict == VINF_SUCCESS)
4752 {
4753 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4754 if (rcStrict == VINF_SUCCESS)
4755 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4756 }
4757
4758 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4759 if (rcStrict == VINF_SUCCESS)
4760 {
4761 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4762 pCtx->rip = IdtEntry.offSel;
4763 pCtx->cs.Sel = IdtEntry.uSel;
4764 pCtx->cs.ValidSel = IdtEntry.uSel;
4765 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4766 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4767 && uVector == X86_XCPT_PF)
4768 pCtx->cr2 = GCPtrFault;
4769
4770 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4771 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4772 | HM_CHANGED_GUEST_RSP);
4773
4774 /*
4775 * If we delivered a hardware exception (other than an NMI) and if there was
4776 * block-by-STI in effect, we should clear it.
4777 */
4778 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4779 {
4780 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4781 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4782 Log4Func(("Clearing inhibition due to STI\n"));
4783 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4784 }
4785
4786 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4787 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4788
4789 /*
4790 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4791 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4792 */
4793 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4794
4795 /*
4796 * If we eventually support nested-guest execution without unrestricted guest execution,
4797 * we should set fInterceptEvents here.
4798 */
4799 Assert(!fIsNestedGuest);
4800
4801 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4802 if (fStepping)
4803 rcStrict = VINF_EM_DBG_STEPPED;
4804 }
4805 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4806 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4807 return rcStrict;
4808 }
4809#else
4810 RT_NOREF(pVmcsInfo);
4811#endif
4812 }
4813
4814 /*
4815 * Validate.
4816 */
4817 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4818 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4819
4820 /*
4821 * Inject the event into the VMCS.
4822 */
4823 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4824 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4825 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4826 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4827 AssertRC(rc);
4828
4829 /*
4830 * Update guest CR2 if this is a page-fault.
4831 */
4832 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4833 pCtx->cr2 = GCPtrFault;
4834
4835 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4836 return VINF_SUCCESS;
4837}
4838
4839
4840/**
4841 * Evaluates the event to be delivered to the guest and sets it as the pending
4842 * event.
4843 *
4844 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4845 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4846 * NOT restore these force-flags.
4847 *
4848 * @returns Strict VBox status code (i.e. informational status codes too).
4849 * @param pVCpu The cross context virtual CPU structure.
4850 * @param pVmcsInfo The VMCS information structure.
4851 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4852 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4853 */
4854static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4855{
4856 Assert(pfIntrState);
4857 Assert(!TRPMHasTrap(pVCpu));
4858
4859 /*
4860 * Compute/update guest-interruptibility state related FFs.
4861 * The FFs will be used below while evaluating events to be injected.
4862 */
4863 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4864
4865 /*
4866 * Evaluate if a new event needs to be injected.
4867 * An event that's already pending has already performed all necessary checks.
4868 */
4869 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4870 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4871 {
4872 /** @todo SMI. SMIs take priority over NMIs. */
4873
4874 /*
4875 * NMIs.
4876 * NMIs take priority over external interrupts.
4877 */
4878#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4879 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4880#endif
4881 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4882 {
4883 /*
4884 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4885 *
4886 * For a nested-guest, the FF always indicates the outer guest's ability to
4887 * receive an NMI while the guest-interruptibility state bit depends on whether
4888 * the nested-hypervisor is using virtual-NMIs.
4889 */
4890 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4891 {
4892#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4893 if ( fIsNestedGuest
4894 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4895 return IEMExecVmxVmexitXcptNmi(pVCpu);
4896#endif
4897 vmxHCSetPendingXcptNmi(pVCpu);
4898 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4899 Log4Func(("NMI pending injection\n"));
4900
4901 /* We've injected the NMI, bail. */
4902 return VINF_SUCCESS;
4903 }
4904 if (!fIsNestedGuest)
4905 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4906 }
4907
4908 /*
4909 * External interrupts (PIC/APIC).
4910 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4911 * We cannot re-request the interrupt from the controller again.
4912 */
4913 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4914 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4915 {
4916 Assert(!DBGFIsStepping(pVCpu));
4917 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4918 AssertRC(rc);
4919
4920 /*
4921 * We must not check EFLAGS directly when executing a nested-guest, use
4922 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4923 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4924 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4925 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4926 *
4927 * See Intel spec. 25.4.1 "Event Blocking".
4928 */
4929 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4930 {
4931#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4932 if ( fIsNestedGuest
4933 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4934 {
4935 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4936 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4937 return rcStrict;
4938 }
4939#endif
4940 uint8_t u8Interrupt;
4941 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4942 if (RT_SUCCESS(rc))
4943 {
4944#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4945 if ( fIsNestedGuest
4946 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4947 {
4948 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4949 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4950 return rcStrict;
4951 }
4952#endif
4953 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4954 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4955 }
4956 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4957 {
4958 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4959
4960 if ( !fIsNestedGuest
4961 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4962 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4963 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4964
4965 /*
4966 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4967 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4968 * need to re-set this force-flag here.
4969 */
4970 }
4971 else
4972 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4973
4974 /* We've injected the interrupt or taken necessary action, bail. */
4975 return VINF_SUCCESS;
4976 }
4977 if (!fIsNestedGuest)
4978 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4979 }
4980 }
4981 else if (!fIsNestedGuest)
4982 {
4983 /*
4984 * An event is being injected or we are in an interrupt shadow. Check if another event is
4985 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4986 * the pending event.
4987 */
4988 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4989 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4990 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4991 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4992 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4993 }
4994 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4995
4996 return VINF_SUCCESS;
4997}
4998
4999
5000/**
5001 * Injects any pending events into the guest if the guest is in a state to
5002 * receive them.
5003 *
5004 * @returns Strict VBox status code (i.e. informational status codes too).
5005 * @param pVCpu The cross context virtual CPU structure.
5006 * @param pVmcsInfo The VMCS information structure.
5007 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5008 * @param fIntrState The VT-x guest-interruptibility state.
5009 * @param fStepping Whether we are single-stepping the guest using the
5010 * hypervisor debugger and should return
5011 * VINF_EM_DBG_STEPPED if the event was dispatched
5012 * directly.
5013 */
5014static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5015 uint32_t fIntrState, bool fStepping)
5016{
5017 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5018#ifndef IN_NEM_DARWIN
5019 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5020#endif
5021
5022#ifdef VBOX_STRICT
5023 /*
5024 * Verify guest-interruptibility state.
5025 *
5026 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5027 * since injecting an event may modify the interruptibility state and we must thus always
5028 * use fIntrState.
5029 */
5030 {
5031 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5032 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5033 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5034 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5035 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5036 Assert(!TRPMHasTrap(pVCpu));
5037 NOREF(fBlockMovSS); NOREF(fBlockSti);
5038 }
5039#endif
5040
5041 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5042 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5043 {
5044 /*
5045 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5046 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5047 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5048 *
5049 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5050 */
5051 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5052#ifdef VBOX_STRICT
5053 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5054 {
5055 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5056 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5057 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5058 }
5059 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5060 {
5061 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5062 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5063 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5064 }
5065#endif
5066 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5067 uIntType));
5068
5069 /*
5070 * Inject the event and get any changes to the guest-interruptibility state.
5071 *
5072 * The guest-interruptibility state may need to be updated if we inject the event
5073 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5074 */
5075 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5076 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5077
5078 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5079 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5080 else
5081 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5082 }
5083
5084 /*
5085 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5086 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5087 */
5088 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5089 && !fIsNestedGuest)
5090 {
5091 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5092
5093 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5094 {
5095 /*
5096 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5097 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5098 */
5099 Assert(!DBGFIsStepping(pVCpu));
5100 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5101 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5102 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5103 AssertRC(rc);
5104 }
5105 else
5106 {
5107 /*
5108 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5109 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5110 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5111 * we use MTF, so just make sure it's called before executing guest-code.
5112 */
5113 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5114 }
5115 }
5116 /* else: for nested-guest currently handling while merging controls. */
5117
5118 /*
5119 * Finally, update the guest-interruptibility state.
5120 *
5121 * This is required for the real-on-v86 software interrupt injection, for
5122 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5123 */
5124 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5125 AssertRC(rc);
5126
5127 /*
5128 * There's no need to clear the VM-entry interruption-information field here if we're not
5129 * injecting anything. VT-x clears the valid bit on every VM-exit.
5130 *
5131 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5132 */
5133
5134 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5135 return rcStrict;
5136}
5137
5138
5139/**
5140 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5141 * and update error record fields accordingly.
5142 *
5143 * @returns VMX_IGS_* error codes.
5144 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5145 * wrong with the guest state.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure.
5148 * @param pVmcsInfo The VMCS info. object.
5149 *
5150 * @remarks This function assumes our cache of the VMCS controls
5151 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5152 */
5153static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5154{
5155#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5156#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5157
5158 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5159 uint32_t uError = VMX_IGS_ERROR;
5160 uint32_t u32IntrState = 0;
5161#ifndef IN_NEM_DARWIN
5162 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5163 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5164#else
5165 bool const fUnrestrictedGuest = true;
5166#endif
5167 do
5168 {
5169 int rc;
5170
5171 /*
5172 * Guest-interruptibility state.
5173 *
5174 * Read this first so that any check that fails prior to those that actually
5175 * require the guest-interruptibility state would still reflect the correct
5176 * VMCS value and avoids causing further confusion.
5177 */
5178 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5179 AssertRC(rc);
5180
5181 uint32_t u32Val;
5182 uint64_t u64Val;
5183
5184 /*
5185 * CR0.
5186 */
5187 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5188 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5189 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5190 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5191 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5192 if (fUnrestrictedGuest)
5193 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5194
5195 uint64_t u64GuestCr0;
5196 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5197 AssertRC(rc);
5198 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5199 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5200 if ( !fUnrestrictedGuest
5201 && (u64GuestCr0 & X86_CR0_PG)
5202 && !(u64GuestCr0 & X86_CR0_PE))
5203 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5204
5205 /*
5206 * CR4.
5207 */
5208 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5209 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5210 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5211
5212 uint64_t u64GuestCr4;
5213 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5214 AssertRC(rc);
5215 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5216 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5217
5218 /*
5219 * IA32_DEBUGCTL MSR.
5220 */
5221 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5222 AssertRC(rc);
5223 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5224 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5225 {
5226 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5227 }
5228 uint64_t u64DebugCtlMsr = u64Val;
5229
5230#ifdef VBOX_STRICT
5231 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5232 AssertRC(rc);
5233 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5234#endif
5235 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5236
5237 /*
5238 * RIP and RFLAGS.
5239 */
5240 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5241 AssertRC(rc);
5242 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5243 if ( !fLongModeGuest
5244 || !pCtx->cs.Attr.n.u1Long)
5245 {
5246 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5247 }
5248 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5249 * must be identical if the "IA-32e mode guest" VM-entry
5250 * control is 1 and CS.L is 1. No check applies if the
5251 * CPU supports 64 linear-address bits. */
5252
5253 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5254 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5255 AssertRC(rc);
5256 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5257 VMX_IGS_RFLAGS_RESERVED);
5258 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5259 uint32_t const u32Eflags = u64Val;
5260
5261 if ( fLongModeGuest
5262 || ( fUnrestrictedGuest
5263 && !(u64GuestCr0 & X86_CR0_PE)))
5264 {
5265 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5266 }
5267
5268 uint32_t u32EntryInfo;
5269 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5270 AssertRC(rc);
5271 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5272 {
5273 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5274 }
5275
5276 /*
5277 * 64-bit checks.
5278 */
5279 if (fLongModeGuest)
5280 {
5281 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5282 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5283 }
5284
5285 if ( !fLongModeGuest
5286 && (u64GuestCr4 & X86_CR4_PCIDE))
5287 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5288
5289 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5290 * 51:32 beyond the processor's physical-address width are 0. */
5291
5292 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5293 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5294 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5295
5296#ifndef IN_NEM_DARWIN
5297 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5298 AssertRC(rc);
5299 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5300
5301 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5302 AssertRC(rc);
5303 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5304#endif
5305
5306 /*
5307 * PERF_GLOBAL MSR.
5308 */
5309 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5310 {
5311 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5312 AssertRC(rc);
5313 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5314 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5315 }
5316
5317 /*
5318 * PAT MSR.
5319 */
5320 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5321 {
5322 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5323 AssertRC(rc);
5324 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5325 for (unsigned i = 0; i < 8; i++)
5326 {
5327 uint8_t u8Val = (u64Val & 0xff);
5328 if ( u8Val != 0 /* UC */
5329 && u8Val != 1 /* WC */
5330 && u8Val != 4 /* WT */
5331 && u8Val != 5 /* WP */
5332 && u8Val != 6 /* WB */
5333 && u8Val != 7 /* UC- */)
5334 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5335 u64Val >>= 8;
5336 }
5337 }
5338
5339 /*
5340 * EFER MSR.
5341 */
5342 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5343 {
5344 Assert(g_fHmVmxSupportsVmcsEfer);
5345 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5346 AssertRC(rc);
5347 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5348 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5349 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5350 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5351 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5352 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5353 * iemVmxVmentryCheckGuestState(). */
5354 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5355 || !(u64GuestCr0 & X86_CR0_PG)
5356 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5357 VMX_IGS_EFER_LMA_LME_MISMATCH);
5358 }
5359
5360 /*
5361 * Segment registers.
5362 */
5363 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5364 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5365 if (!(u32Eflags & X86_EFL_VM))
5366 {
5367 /* CS */
5368 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5369 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5370 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5371 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5372 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5373 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5374 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5375 /* CS cannot be loaded with NULL in protected mode. */
5376 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5377 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5378 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5379 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5380 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5381 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5382 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5383 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5384 else
5385 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5386
5387 /* SS */
5388 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5389 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5390 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5391 if ( !(pCtx->cr0 & X86_CR0_PE)
5392 || pCtx->cs.Attr.n.u4Type == 3)
5393 {
5394 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5395 }
5396
5397 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5398 {
5399 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5400 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5401 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5402 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5403 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5404 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5405 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5406 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5407 }
5408
5409 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5410 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5411 {
5412 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5413 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5414 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5415 || pCtx->ds.Attr.n.u4Type > 11
5416 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5417 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5418 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5419 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5420 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5421 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5422 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5423 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5424 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5425 }
5426 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5427 {
5428 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5429 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5430 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5431 || pCtx->es.Attr.n.u4Type > 11
5432 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5433 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5434 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5435 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5436 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5437 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5438 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5439 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5440 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5441 }
5442 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5443 {
5444 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5445 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5446 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5447 || pCtx->fs.Attr.n.u4Type > 11
5448 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5449 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5450 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5451 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5452 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5453 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5454 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5455 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5456 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5457 }
5458 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5459 {
5460 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5461 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5462 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5463 || pCtx->gs.Attr.n.u4Type > 11
5464 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5465 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5466 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5467 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5468 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5469 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5470 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5471 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5472 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5473 }
5474 /* 64-bit capable CPUs. */
5475 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5476 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5477 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5478 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5479 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5480 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5481 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5482 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5483 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5484 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5485 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5486 }
5487 else
5488 {
5489 /* V86 mode checks. */
5490 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5491 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5492 {
5493 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5494 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5495 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5496 }
5497 else
5498 {
5499 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5500 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5501 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5502 }
5503
5504 /* CS */
5505 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5506 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5507 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5508 /* SS */
5509 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5510 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5511 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5512 /* DS */
5513 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5514 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5515 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5516 /* ES */
5517 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5518 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5519 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5520 /* FS */
5521 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5522 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5523 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5524 /* GS */
5525 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5526 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5527 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5528 /* 64-bit capable CPUs. */
5529 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5530 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5531 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5532 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5533 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5534 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5535 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5536 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5537 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5538 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5539 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5540 }
5541
5542 /*
5543 * TR.
5544 */
5545 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5546 /* 64-bit capable CPUs. */
5547 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5548 if (fLongModeGuest)
5549 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5550 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5551 else
5552 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5553 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5554 VMX_IGS_TR_ATTR_TYPE_INVALID);
5555 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5556 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5557 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5558 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5559 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5560 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5561 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5562 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5563
5564 /*
5565 * GDTR and IDTR (64-bit capable checks).
5566 */
5567 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5568 AssertRC(rc);
5569 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5570
5571 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5572 AssertRC(rc);
5573 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5574
5575 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5576 AssertRC(rc);
5577 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5578
5579 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5580 AssertRC(rc);
5581 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5582
5583 /*
5584 * Guest Non-Register State.
5585 */
5586 /* Activity State. */
5587 uint32_t u32ActivityState;
5588 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5589 AssertRC(rc);
5590 HMVMX_CHECK_BREAK( !u32ActivityState
5591 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5592 VMX_IGS_ACTIVITY_STATE_INVALID);
5593 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5594 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5595
5596 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5597 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5598 {
5599 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5600 }
5601
5602 /** @todo Activity state and injecting interrupts. Left as a todo since we
5603 * currently don't use activity states but ACTIVE. */
5604
5605 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5606 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5607
5608 /* Guest interruptibility-state. */
5609 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5610 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5611 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5612 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5613 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5614 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5615 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5616 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5617 {
5618 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5619 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5620 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5621 }
5622 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5623 {
5624 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5625 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5626 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5627 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5628 }
5629 /** @todo Assumes the processor is not in SMM. */
5630 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5631 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5632 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5633 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5634 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5635 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5636 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5637 {
5638 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5639 }
5640
5641 /* Pending debug exceptions. */
5642 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5643 AssertRC(rc);
5644 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5645 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5646 u32Val = u64Val; /* For pending debug exceptions checks below. */
5647
5648 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5649 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5650 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5651 {
5652 if ( (u32Eflags & X86_EFL_TF)
5653 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5654 {
5655 /* Bit 14 is PendingDebug.BS. */
5656 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5657 }
5658 if ( !(u32Eflags & X86_EFL_TF)
5659 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5660 {
5661 /* Bit 14 is PendingDebug.BS. */
5662 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5663 }
5664 }
5665
5666#ifndef IN_NEM_DARWIN
5667 /* VMCS link pointer. */
5668 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5669 AssertRC(rc);
5670 if (u64Val != UINT64_C(0xffffffffffffffff))
5671 {
5672 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5673 /** @todo Bits beyond the processor's physical-address width MBZ. */
5674 /** @todo SMM checks. */
5675 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5676 Assert(pVmcsInfo->pvShadowVmcs);
5677 VMXVMCSREVID VmcsRevId;
5678 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5679 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5680 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5681 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5682 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5683 }
5684
5685 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5686 * not using nested paging? */
5687 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5688 && !fLongModeGuest
5689 && CPUMIsGuestInPAEModeEx(pCtx))
5690 {
5691 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5692 AssertRC(rc);
5693 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5694
5695 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5696 AssertRC(rc);
5697 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5698
5699 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5700 AssertRC(rc);
5701 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5702
5703 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5704 AssertRC(rc);
5705 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5706 }
5707#endif
5708
5709 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5710 if (uError == VMX_IGS_ERROR)
5711 uError = VMX_IGS_REASON_NOT_FOUND;
5712 } while (0);
5713
5714 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5715 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5716 return uError;
5717
5718#undef HMVMX_ERROR_BREAK
5719#undef HMVMX_CHECK_BREAK
5720}
5721
5722
5723#ifndef HMVMX_USE_FUNCTION_TABLE
5724/**
5725 * Handles a guest VM-exit from hardware-assisted VMX execution.
5726 *
5727 * @returns Strict VBox status code (i.e. informational status codes too).
5728 * @param pVCpu The cross context virtual CPU structure.
5729 * @param pVmxTransient The VMX-transient structure.
5730 */
5731DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5732{
5733#ifdef DEBUG_ramshankar
5734# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5735 do { \
5736 if (a_fSave != 0) \
5737 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5738 VBOXSTRICTRC rcStrict = a_CallExpr; \
5739 if (a_fSave != 0) \
5740 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5741 return rcStrict; \
5742 } while (0)
5743#else
5744# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5745#endif
5746 uint32_t const uExitReason = pVmxTransient->uExitReason;
5747 switch (uExitReason)
5748 {
5749 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5750 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5751 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5752 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5753 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5754 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5755 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5756 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5757 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5758 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5759 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5760 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5761 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5762 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5763 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5764 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5765 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5766 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5767 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5768 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5769 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5770 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5771 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5772 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5773 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5774 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5775 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5776 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5777 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5778 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5779#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5780 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5781 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5782 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5783 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5784 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5785 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5786 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5787 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5788 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5789 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5790#else
5791 case VMX_EXIT_VMCLEAR:
5792 case VMX_EXIT_VMLAUNCH:
5793 case VMX_EXIT_VMPTRLD:
5794 case VMX_EXIT_VMPTRST:
5795 case VMX_EXIT_VMREAD:
5796 case VMX_EXIT_VMRESUME:
5797 case VMX_EXIT_VMWRITE:
5798 case VMX_EXIT_VMXOFF:
5799 case VMX_EXIT_VMXON:
5800 case VMX_EXIT_INVVPID:
5801 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5802#endif
5803#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5804 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5805#else
5806 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5807#endif
5808
5809 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5810 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5811 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5812
5813 case VMX_EXIT_INIT_SIGNAL:
5814 case VMX_EXIT_SIPI:
5815 case VMX_EXIT_IO_SMI:
5816 case VMX_EXIT_SMI:
5817 case VMX_EXIT_ERR_MSR_LOAD:
5818 case VMX_EXIT_ERR_MACHINE_CHECK:
5819 case VMX_EXIT_PML_FULL:
5820 case VMX_EXIT_VIRTUALIZED_EOI:
5821 case VMX_EXIT_GDTR_IDTR_ACCESS:
5822 case VMX_EXIT_LDTR_TR_ACCESS:
5823 case VMX_EXIT_APIC_WRITE:
5824 case VMX_EXIT_RDRAND:
5825 case VMX_EXIT_RSM:
5826 case VMX_EXIT_VMFUNC:
5827 case VMX_EXIT_ENCLS:
5828 case VMX_EXIT_RDSEED:
5829 case VMX_EXIT_XSAVES:
5830 case VMX_EXIT_XRSTORS:
5831 case VMX_EXIT_UMWAIT:
5832 case VMX_EXIT_TPAUSE:
5833 case VMX_EXIT_LOADIWKEY:
5834 default:
5835 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5836 }
5837#undef VMEXIT_CALL_RET
5838}
5839#endif /* !HMVMX_USE_FUNCTION_TABLE */
5840
5841
5842#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5843/**
5844 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5845 *
5846 * @returns Strict VBox status code (i.e. informational status codes too).
5847 * @param pVCpu The cross context virtual CPU structure.
5848 * @param pVmxTransient The VMX-transient structure.
5849 */
5850DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5851{
5852#ifdef DEBUG_ramshankar
5853# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5854 do { \
5855 if (a_fSave != 0) \
5856 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5857 VBOXSTRICTRC rcStrict = a_CallExpr; \
5858 return rcStrict; \
5859 } while (0)
5860#else
5861# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5862#endif
5863
5864 uint32_t const uExitReason = pVmxTransient->uExitReason;
5865 switch (uExitReason)
5866 {
5867# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5868 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
5869 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
5870# else
5871 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5872 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5873# endif
5874 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
5875 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
5876 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
5877
5878 /*
5879 * We shouldn't direct host physical interrupts to the nested-guest.
5880 */
5881 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5882
5883 /*
5884 * Instructions that cause VM-exits unconditionally or the condition is
5885 * always taken solely from the nested hypervisor (meaning if the VM-exit
5886 * happens, it's guaranteed to be a nested-guest VM-exit).
5887 *
5888 * - Provides VM-exit instruction length ONLY.
5889 */
5890 case VMX_EXIT_CPUID: /* Unconditional. */
5891 case VMX_EXIT_VMCALL:
5892 case VMX_EXIT_GETSEC:
5893 case VMX_EXIT_INVD:
5894 case VMX_EXIT_XSETBV:
5895 case VMX_EXIT_VMLAUNCH:
5896 case VMX_EXIT_VMRESUME:
5897 case VMX_EXIT_VMXOFF:
5898 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5899 case VMX_EXIT_VMFUNC:
5900 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
5901
5902 /*
5903 * Instructions that cause VM-exits unconditionally or the condition is
5904 * always taken solely from the nested hypervisor (meaning if the VM-exit
5905 * happens, it's guaranteed to be a nested-guest VM-exit).
5906 *
5907 * - Provides VM-exit instruction length.
5908 * - Provides VM-exit information.
5909 * - Optionally provides Exit qualification.
5910 *
5911 * Since Exit qualification is 0 for all VM-exits where it is not
5912 * applicable, reading and passing it to the guest should produce
5913 * defined behavior.
5914 *
5915 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5916 */
5917 case VMX_EXIT_INVEPT: /* Unconditional. */
5918 case VMX_EXIT_INVVPID:
5919 case VMX_EXIT_VMCLEAR:
5920 case VMX_EXIT_VMPTRLD:
5921 case VMX_EXIT_VMPTRST:
5922 case VMX_EXIT_VMXON:
5923 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5924 case VMX_EXIT_LDTR_TR_ACCESS:
5925 case VMX_EXIT_RDRAND:
5926 case VMX_EXIT_RDSEED:
5927 case VMX_EXIT_XSAVES:
5928 case VMX_EXIT_XRSTORS:
5929 case VMX_EXIT_UMWAIT:
5930 case VMX_EXIT_TPAUSE:
5931 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
5932
5933 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
5934 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
5935 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
5936 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
5937 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
5938 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
5939 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
5940 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
5941 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
5942 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
5943 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
5944 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
5945 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
5946 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
5947 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
5948 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
5949 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
5950 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
5951 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
5952
5953 case VMX_EXIT_PREEMPT_TIMER:
5954 {
5955 /** @todo NSTVMX: Preempt timer. */
5956 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5957 }
5958
5959 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
5960 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
5961
5962 case VMX_EXIT_VMREAD:
5963 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
5964
5965 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
5966 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
5967
5968 case VMX_EXIT_INIT_SIGNAL:
5969 case VMX_EXIT_SIPI:
5970 case VMX_EXIT_IO_SMI:
5971 case VMX_EXIT_SMI:
5972 case VMX_EXIT_ERR_MSR_LOAD:
5973 case VMX_EXIT_ERR_MACHINE_CHECK:
5974 case VMX_EXIT_PML_FULL:
5975 case VMX_EXIT_RSM:
5976 default:
5977 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5978 }
5979#undef VMEXIT_CALL_RET
5980}
5981#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5982
5983
5984/** @name VM-exit helpers.
5985 * @{
5986 */
5987/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5988/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5989/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5990
5991/** Macro for VM-exits called unexpectedly. */
5992#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5993 do { \
5994 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5995 return VERR_VMX_UNEXPECTED_EXIT; \
5996 } while (0)
5997
5998#ifdef VBOX_STRICT
5999# ifndef IN_NEM_DARWIN
6000/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6001# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6002 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6003
6004# define HMVMX_ASSERT_PREEMPT_CPUID() \
6005 do { \
6006 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6007 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6008 } while (0)
6009
6010# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6011 do { \
6012 AssertPtr((a_pVCpu)); \
6013 AssertPtr((a_pVmxTransient)); \
6014 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6015 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6016 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6017 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6018 Assert((a_pVmxTransient)->pVmcsInfo); \
6019 Assert(ASMIntAreEnabled()); \
6020 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6021 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6022 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6023 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6024 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6025 HMVMX_ASSERT_PREEMPT_CPUID(); \
6026 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6027 } while (0)
6028# else
6029# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6030# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6031# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6032 do { \
6033 AssertPtr((a_pVCpu)); \
6034 AssertPtr((a_pVmxTransient)); \
6035 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6036 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6037 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6038 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6039 Assert((a_pVmxTransient)->pVmcsInfo); \
6040 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6041 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6042 } while (0)
6043# endif
6044
6045# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6046 do { \
6047 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6048 Assert((a_pVmxTransient)->fIsNestedGuest); \
6049 } while (0)
6050
6051# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6052 do { \
6053 Log4Func(("\n")); \
6054 } while (0)
6055#else
6056# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6057 do { \
6058 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6059 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6060 } while (0)
6061
6062# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6063 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6064
6065# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6066#endif
6067
6068#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6069/** Macro that does the necessary privilege checks and intercepted VM-exits for
6070 * guests that attempted to execute a VMX instruction. */
6071# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6072 do \
6073 { \
6074 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6075 if (rcStrictTmp == VINF_SUCCESS) \
6076 { /* likely */ } \
6077 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6078 { \
6079 Assert((a_pVCpu)->hm.s.Event.fPending); \
6080 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6081 return VINF_SUCCESS; \
6082 } \
6083 else \
6084 { \
6085 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6086 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6087 } \
6088 } while (0)
6089
6090/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6091# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6092 do \
6093 { \
6094 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6095 (a_pGCPtrEffAddr)); \
6096 if (rcStrictTmp == VINF_SUCCESS) \
6097 { /* likely */ } \
6098 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6099 { \
6100 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6101 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6102 NOREF(uXcptTmp); \
6103 return VINF_SUCCESS; \
6104 } \
6105 else \
6106 { \
6107 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6108 return rcStrictTmp; \
6109 } \
6110 } while (0)
6111#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6112
6113
6114/**
6115 * Advances the guest RIP by the specified number of bytes.
6116 *
6117 * @param pVCpu The cross context virtual CPU structure.
6118 * @param cbInstr Number of bytes to advance the RIP by.
6119 *
6120 * @remarks No-long-jump zone!!!
6121 */
6122DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6123{
6124 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6125
6126 /*
6127 * Advance RIP.
6128 *
6129 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6130 * when the addition causes a "carry" into the upper half and check whether
6131 * we're in 64-bit and can go on with it or wether we should zap the top
6132 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6133 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6134 *
6135 * See PC wrap around tests in bs3-cpu-weird-1.
6136 */
6137 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6138 uint64_t const uRipNext = uRipPrev + cbInstr;
6139 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6140 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6141 pVCpu->cpum.GstCtx.rip = uRipNext;
6142 else
6143 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6144
6145 /*
6146 * Clear RF and interrupt shadowing.
6147 */
6148 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6149 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6150 else
6151 {
6152 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6153 {
6154 /** @todo \#DB - single step. */
6155 }
6156 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6157 }
6158 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6159
6160 /* Mark both RIP and RFLAGS as updated. */
6161 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6162}
6163
6164
6165/**
6166 * Advances the guest RIP after reading it from the VMCS.
6167 *
6168 * @returns VBox status code, no informational status codes.
6169 * @param pVCpu The cross context virtual CPU structure.
6170 * @param pVmxTransient The VMX-transient structure.
6171 *
6172 * @remarks No-long-jump zone!!!
6173 */
6174static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6175{
6176 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6177 /** @todo consider template here after checking callers. */
6178 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6179 AssertRCReturn(rc, rc);
6180
6181 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6182 return VINF_SUCCESS;
6183}
6184
6185
6186/**
6187 * Handle a condition that occurred while delivering an event through the guest or
6188 * nested-guest IDT.
6189 *
6190 * @returns Strict VBox status code (i.e. informational status codes too).
6191 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6192 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6193 * to continue execution of the guest which will delivery the \#DF.
6194 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6195 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6196 *
6197 * @param pVCpu The cross context virtual CPU structure.
6198 * @param pVmxTransient The VMX-transient structure.
6199 *
6200 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6201 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6202 * is due to an EPT violation, PML full or SPP-related event.
6203 *
6204 * @remarks No-long-jump zone!!!
6205 */
6206static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6207{
6208 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6209 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6210 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6211 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6212 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6213 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6214
6215 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6216 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6217 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6218 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6219 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6220 {
6221 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6222 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6223
6224 /*
6225 * If the event was a software interrupt (generated with INT n) or a software exception
6226 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6227 * can handle the VM-exit and continue guest execution which will re-execute the
6228 * instruction rather than re-injecting the exception, as that can cause premature
6229 * trips to ring-3 before injection and involve TRPM which currently has no way of
6230 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6231 * the problem).
6232 */
6233 IEMXCPTRAISE enmRaise;
6234 IEMXCPTRAISEINFO fRaiseInfo;
6235 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6236 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6237 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6238 {
6239 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6240 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6241 }
6242 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6243 {
6244 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6245 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6246 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6247
6248 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6249 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6250
6251 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6252
6253 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6254 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6255 {
6256 pVmxTransient->fVectoringPF = true;
6257 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6258 }
6259 }
6260 else
6261 {
6262 /*
6263 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6264 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6265 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6266 */
6267 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6268 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6269 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6270 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6271 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6272 }
6273
6274 /*
6275 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6276 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6277 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6278 * subsequent VM-entry would fail, see @bugref{7445}.
6279 *
6280 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6281 */
6282 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6283 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6284 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6285 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6286 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6287
6288 switch (enmRaise)
6289 {
6290 case IEMXCPTRAISE_CURRENT_XCPT:
6291 {
6292 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6293 Assert(rcStrict == VINF_SUCCESS);
6294 break;
6295 }
6296
6297 case IEMXCPTRAISE_PREV_EVENT:
6298 {
6299 uint32_t u32ErrCode;
6300 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6301 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6302 else
6303 u32ErrCode = 0;
6304
6305 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6306 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6307 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6308 pVCpu->cpum.GstCtx.cr2);
6309
6310 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6311 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6312 Assert(rcStrict == VINF_SUCCESS);
6313 break;
6314 }
6315
6316 case IEMXCPTRAISE_REEXEC_INSTR:
6317 Assert(rcStrict == VINF_SUCCESS);
6318 break;
6319
6320 case IEMXCPTRAISE_DOUBLE_FAULT:
6321 {
6322 /*
6323 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6324 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6325 */
6326 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6327 {
6328 pVmxTransient->fVectoringDoublePF = true;
6329 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6330 pVCpu->cpum.GstCtx.cr2));
6331 rcStrict = VINF_SUCCESS;
6332 }
6333 else
6334 {
6335 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6336 vmxHCSetPendingXcptDF(pVCpu);
6337 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6338 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6339 rcStrict = VINF_HM_DOUBLE_FAULT;
6340 }
6341 break;
6342 }
6343
6344 case IEMXCPTRAISE_TRIPLE_FAULT:
6345 {
6346 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6347 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6348 rcStrict = VINF_EM_RESET;
6349 break;
6350 }
6351
6352 case IEMXCPTRAISE_CPU_HANG:
6353 {
6354 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6355 rcStrict = VERR_EM_GUEST_CPU_HANG;
6356 break;
6357 }
6358
6359 default:
6360 {
6361 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6362 rcStrict = VERR_VMX_IPE_2;
6363 break;
6364 }
6365 }
6366 }
6367 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6368 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6369 {
6370 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6371 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6372 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6373 {
6374 /*
6375 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6376 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6377 * that virtual NMIs remain blocked until the IRET execution is completed.
6378 *
6379 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6380 */
6381 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6382 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6383 }
6384 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6385 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6386 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6387 {
6388 /*
6389 * Execution of IRET caused an EPT violation, page-modification log-full event or
6390 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6391 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6392 * that virtual NMIs remain blocked until the IRET execution is completed.
6393 *
6394 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6395 */
6396 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6397 {
6398 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6399 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6400 }
6401 }
6402 }
6403
6404 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6405 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6406 return rcStrict;
6407}
6408
6409
6410#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6411/**
6412 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6413 * guest attempting to execute a VMX instruction.
6414 *
6415 * @returns Strict VBox status code (i.e. informational status codes too).
6416 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6417 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6418 *
6419 * @param pVCpu The cross context virtual CPU structure.
6420 * @param uExitReason The VM-exit reason.
6421 *
6422 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6423 * @remarks No-long-jump zone!!!
6424 */
6425static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6426{
6427 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6428 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6429
6430 /*
6431 * The physical CPU would have already checked the CPU mode/code segment.
6432 * We shall just assert here for paranoia.
6433 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6434 */
6435 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6436 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6437 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6438
6439 if (uExitReason == VMX_EXIT_VMXON)
6440 {
6441 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6442
6443 /*
6444 * We check CR4.VMXE because it is required to be always set while in VMX operation
6445 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6446 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6447 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6448 */
6449 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6450 {
6451 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6452 vmxHCSetPendingXcptUD(pVCpu);
6453 return VINF_HM_PENDING_XCPT;
6454 }
6455 }
6456 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6457 {
6458 /*
6459 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6460 * (other than VMXON), we need to raise a #UD.
6461 */
6462 Log4Func(("Not in VMX root mode -> #UD\n"));
6463 vmxHCSetPendingXcptUD(pVCpu);
6464 return VINF_HM_PENDING_XCPT;
6465 }
6466
6467 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6468 return VINF_SUCCESS;
6469}
6470
6471
6472/**
6473 * Decodes the memory operand of an instruction that caused a VM-exit.
6474 *
6475 * The Exit qualification field provides the displacement field for memory
6476 * operand instructions, if any.
6477 *
6478 * @returns Strict VBox status code (i.e. informational status codes too).
6479 * @retval VINF_SUCCESS if the operand was successfully decoded.
6480 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6481 * operand.
6482 * @param pVCpu The cross context virtual CPU structure.
6483 * @param uExitInstrInfo The VM-exit instruction information field.
6484 * @param enmMemAccess The memory operand's access type (read or write).
6485 * @param GCPtrDisp The instruction displacement field, if any. For
6486 * RIP-relative addressing pass RIP + displacement here.
6487 * @param pGCPtrMem Where to store the effective destination memory address.
6488 *
6489 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6490 * virtual-8086 mode hence skips those checks while verifying if the
6491 * segment is valid.
6492 */
6493static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6494 PRTGCPTR pGCPtrMem)
6495{
6496 Assert(pGCPtrMem);
6497 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6498 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6499 | CPUMCTX_EXTRN_CR0);
6500
6501 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6502 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6503 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6504
6505 VMXEXITINSTRINFO ExitInstrInfo;
6506 ExitInstrInfo.u = uExitInstrInfo;
6507 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6508 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6509 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6510 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6511 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6512 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6513 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6514 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6515 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6516
6517 /*
6518 * Validate instruction information.
6519 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6520 */
6521 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6522 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6523 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6524 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6525 AssertLogRelMsgReturn(fIsMemOperand,
6526 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6527
6528 /*
6529 * Compute the complete effective address.
6530 *
6531 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6532 * See AMD spec. 4.5.2 "Segment Registers".
6533 */
6534 RTGCPTR GCPtrMem = GCPtrDisp;
6535 if (fBaseRegValid)
6536 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6537 if (fIdxRegValid)
6538 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6539
6540 RTGCPTR const GCPtrOff = GCPtrMem;
6541 if ( !fIsLongMode
6542 || iSegReg >= X86_SREG_FS)
6543 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6544 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6545
6546 /*
6547 * Validate effective address.
6548 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6549 */
6550 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6551 Assert(cbAccess > 0);
6552 if (fIsLongMode)
6553 {
6554 if (X86_IS_CANONICAL(GCPtrMem))
6555 {
6556 *pGCPtrMem = GCPtrMem;
6557 return VINF_SUCCESS;
6558 }
6559
6560 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6561 * "Data Limit Checks in 64-bit Mode". */
6562 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6563 vmxHCSetPendingXcptGP(pVCpu, 0);
6564 return VINF_HM_PENDING_XCPT;
6565 }
6566
6567 /*
6568 * This is a watered down version of iemMemApplySegment().
6569 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6570 * and segment CPL/DPL checks are skipped.
6571 */
6572 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6573 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6574 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6575
6576 /* Check if the segment is present and usable. */
6577 if ( pSel->Attr.n.u1Present
6578 && !pSel->Attr.n.u1Unusable)
6579 {
6580 Assert(pSel->Attr.n.u1DescType);
6581 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6582 {
6583 /* Check permissions for the data segment. */
6584 if ( enmMemAccess == VMXMEMACCESS_WRITE
6585 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6586 {
6587 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6588 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6589 return VINF_HM_PENDING_XCPT;
6590 }
6591
6592 /* Check limits if it's a normal data segment. */
6593 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6594 {
6595 if ( GCPtrFirst32 > pSel->u32Limit
6596 || GCPtrLast32 > pSel->u32Limit)
6597 {
6598 Log4Func(("Data segment limit exceeded. "
6599 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6600 GCPtrLast32, pSel->u32Limit));
6601 if (iSegReg == X86_SREG_SS)
6602 vmxHCSetPendingXcptSS(pVCpu, 0);
6603 else
6604 vmxHCSetPendingXcptGP(pVCpu, 0);
6605 return VINF_HM_PENDING_XCPT;
6606 }
6607 }
6608 else
6609 {
6610 /* Check limits if it's an expand-down data segment.
6611 Note! The upper boundary is defined by the B bit, not the G bit! */
6612 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6613 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6614 {
6615 Log4Func(("Expand-down data segment limit exceeded. "
6616 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6617 GCPtrLast32, pSel->u32Limit));
6618 if (iSegReg == X86_SREG_SS)
6619 vmxHCSetPendingXcptSS(pVCpu, 0);
6620 else
6621 vmxHCSetPendingXcptGP(pVCpu, 0);
6622 return VINF_HM_PENDING_XCPT;
6623 }
6624 }
6625 }
6626 else
6627 {
6628 /* Check permissions for the code segment. */
6629 if ( enmMemAccess == VMXMEMACCESS_WRITE
6630 || ( enmMemAccess == VMXMEMACCESS_READ
6631 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6632 {
6633 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6634 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6635 vmxHCSetPendingXcptGP(pVCpu, 0);
6636 return VINF_HM_PENDING_XCPT;
6637 }
6638
6639 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6640 if ( GCPtrFirst32 > pSel->u32Limit
6641 || GCPtrLast32 > pSel->u32Limit)
6642 {
6643 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6644 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6645 if (iSegReg == X86_SREG_SS)
6646 vmxHCSetPendingXcptSS(pVCpu, 0);
6647 else
6648 vmxHCSetPendingXcptGP(pVCpu, 0);
6649 return VINF_HM_PENDING_XCPT;
6650 }
6651 }
6652 }
6653 else
6654 {
6655 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6656 vmxHCSetPendingXcptGP(pVCpu, 0);
6657 return VINF_HM_PENDING_XCPT;
6658 }
6659
6660 *pGCPtrMem = GCPtrMem;
6661 return VINF_SUCCESS;
6662}
6663#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6664
6665
6666/**
6667 * VM-exit helper for LMSW.
6668 */
6669static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6670{
6671 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6672 AssertRCReturn(rc, rc);
6673
6674 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6675 AssertMsg( rcStrict == VINF_SUCCESS
6676 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6677
6678 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6679 if (rcStrict == VINF_IEM_RAISED_XCPT)
6680 {
6681 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6682 rcStrict = VINF_SUCCESS;
6683 }
6684
6685 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6686 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6687 return rcStrict;
6688}
6689
6690
6691/**
6692 * VM-exit helper for CLTS.
6693 */
6694static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6695{
6696 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6697 AssertRCReturn(rc, rc);
6698
6699 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6700 AssertMsg( rcStrict == VINF_SUCCESS
6701 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6702
6703 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6704 if (rcStrict == VINF_IEM_RAISED_XCPT)
6705 {
6706 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6707 rcStrict = VINF_SUCCESS;
6708 }
6709
6710 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6711 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6712 return rcStrict;
6713}
6714
6715
6716/**
6717 * VM-exit helper for MOV from CRx (CRx read).
6718 */
6719static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6720{
6721 Assert(iCrReg < 16);
6722 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6723
6724 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6725 AssertRCReturn(rc, rc);
6726
6727 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6728 AssertMsg( rcStrict == VINF_SUCCESS
6729 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6730
6731 if (iGReg == X86_GREG_xSP)
6732 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6733 else
6734 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6735#ifdef VBOX_WITH_STATISTICS
6736 switch (iCrReg)
6737 {
6738 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6739 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6740 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6741 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6742 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6743 }
6744#endif
6745 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6746 return rcStrict;
6747}
6748
6749
6750/**
6751 * VM-exit helper for MOV to CRx (CRx write).
6752 */
6753static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6754{
6755 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6756
6757 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6758 AssertMsg( rcStrict == VINF_SUCCESS
6759 || rcStrict == VINF_IEM_RAISED_XCPT
6760 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6761
6762 switch (iCrReg)
6763 {
6764 case 0:
6765 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6766 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6767 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6768 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6769 break;
6770
6771 case 2:
6772 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6773 /* Nothing to do here, CR2 it's not part of the VMCS. */
6774 break;
6775
6776 case 3:
6777 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6778 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6779 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6780 break;
6781
6782 case 4:
6783 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6784 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6785#ifndef IN_NEM_DARWIN
6786 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6787 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6788#else
6789 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6790#endif
6791 break;
6792
6793 case 8:
6794 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6795 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6796 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6797 break;
6798
6799 default:
6800 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6801 break;
6802 }
6803
6804 if (rcStrict == VINF_IEM_RAISED_XCPT)
6805 {
6806 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6807 rcStrict = VINF_SUCCESS;
6808 }
6809 return rcStrict;
6810}
6811
6812
6813/**
6814 * VM-exit exception handler for \#PF (Page-fault exception).
6815 *
6816 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6817 */
6818static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6819{
6820 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6821 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6822
6823#ifndef IN_NEM_DARWIN
6824 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6825 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6826 { /* likely */ }
6827 else
6828#endif
6829 {
6830#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6831 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6832#endif
6833 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6834 if (!pVmxTransient->fVectoringDoublePF)
6835 {
6836 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6837 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6838 }
6839 else
6840 {
6841 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6842 Assert(!pVmxTransient->fIsNestedGuest);
6843 vmxHCSetPendingXcptDF(pVCpu);
6844 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6845 }
6846 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6847 return VINF_SUCCESS;
6848 }
6849
6850 Assert(!pVmxTransient->fIsNestedGuest);
6851
6852 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6853 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6854 if (pVmxTransient->fVectoringPF)
6855 {
6856 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6857 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6858 }
6859
6860 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6861 AssertRCReturn(rc, rc);
6862
6863 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6864 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6865
6866 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6867 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6868
6869 Log4Func(("#PF: rc=%Rrc\n", rc));
6870 if (rc == VINF_SUCCESS)
6871 {
6872 /*
6873 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6874 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6875 */
6876 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6877 TRPMResetTrap(pVCpu);
6878 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6879 return rc;
6880 }
6881
6882 if (rc == VINF_EM_RAW_GUEST_TRAP)
6883 {
6884 if (!pVmxTransient->fVectoringDoublePF)
6885 {
6886 /* It's a guest page fault and needs to be reflected to the guest. */
6887 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6888 TRPMResetTrap(pVCpu);
6889 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6890 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6891 uGstErrorCode, pVmxTransient->uExitQual);
6892 }
6893 else
6894 {
6895 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6896 TRPMResetTrap(pVCpu);
6897 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6898 vmxHCSetPendingXcptDF(pVCpu);
6899 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6900 }
6901
6902 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6903 return VINF_SUCCESS;
6904 }
6905
6906 TRPMResetTrap(pVCpu);
6907 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6908 return rc;
6909}
6910
6911
6912/**
6913 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6914 *
6915 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6916 */
6917static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6918{
6919 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6920 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6921
6922 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6923 AssertRCReturn(rc, rc);
6924
6925 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6926 {
6927 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6928 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6929
6930 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6931 * provides VM-exit instruction length. If this causes problem later,
6932 * disassemble the instruction like it's done on AMD-V. */
6933 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6934 AssertRCReturn(rc2, rc2);
6935 return rc;
6936 }
6937
6938 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6939 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6940 return VINF_SUCCESS;
6941}
6942
6943
6944/**
6945 * VM-exit exception handler for \#BP (Breakpoint exception).
6946 *
6947 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6948 */
6949static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6950{
6951 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6952 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6953
6954 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6955 AssertRCReturn(rc, rc);
6956
6957 VBOXSTRICTRC rcStrict;
6958 if (!pVmxTransient->fIsNestedGuest)
6959 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
6960 else
6961 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6962
6963 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6964 {
6965 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6966 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6967 rcStrict = VINF_SUCCESS;
6968 }
6969
6970 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6971 return rcStrict;
6972}
6973
6974
6975/**
6976 * VM-exit exception handler for \#AC (Alignment-check exception).
6977 *
6978 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6979 */
6980static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6981{
6982 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6983
6984 /*
6985 * Detect #ACs caused by host having enabled split-lock detection.
6986 * Emulate such instructions.
6987 */
6988#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
6989 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6990 AssertRCReturn(rc, rc);
6991 /** @todo detect split lock in cpu feature? */
6992 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6993 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6994 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6995 || CPUMGetGuestCPL(pVCpu) != 3
6996 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6997 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6998 {
6999 /*
7000 * Check for debug/trace events and import state accordingly.
7001 */
7002 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7003 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7004 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7005#ifndef IN_NEM_DARWIN
7006 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7007#endif
7008 )
7009 {
7010 if (pVM->cCpus == 1)
7011 {
7012#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7013 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7014 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7015#else
7016 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7017 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7018#endif
7019 AssertRCReturn(rc, rc);
7020 }
7021 }
7022 else
7023 {
7024 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7025 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7026 AssertRCReturn(rc, rc);
7027
7028 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7029
7030 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7031 {
7032 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7033 if (rcStrict != VINF_SUCCESS)
7034 return rcStrict;
7035 }
7036 }
7037
7038 /*
7039 * Emulate the instruction.
7040 *
7041 * We have to ignore the LOCK prefix here as we must not retrigger the
7042 * detection on the host. This isn't all that satisfactory, though...
7043 */
7044 if (pVM->cCpus == 1)
7045 {
7046 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7047 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7048
7049 /** @todo For SMP configs we should do a rendezvous here. */
7050 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7051 if (rcStrict == VINF_SUCCESS)
7052#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7053 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7054 HM_CHANGED_GUEST_RIP
7055 | HM_CHANGED_GUEST_RFLAGS
7056 | HM_CHANGED_GUEST_GPRS_MASK
7057 | HM_CHANGED_GUEST_CS
7058 | HM_CHANGED_GUEST_SS);
7059#else
7060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7061#endif
7062 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7063 {
7064 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7065 rcStrict = VINF_SUCCESS;
7066 }
7067 return rcStrict;
7068 }
7069 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7070 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7071 return VINF_EM_EMULATE_SPLIT_LOCK;
7072 }
7073
7074 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7075 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7076 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7077
7078 /* Re-inject it. We'll detect any nesting before getting here. */
7079 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7080 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7081 return VINF_SUCCESS;
7082}
7083
7084
7085/**
7086 * VM-exit exception handler for \#DB (Debug exception).
7087 *
7088 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7089 */
7090static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7091{
7092 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7093 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7094
7095 /*
7096 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7097 */
7098 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7099
7100 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7101 uint64_t const uDR6 = X86_DR6_INIT_VAL
7102 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7103 | X86_DR6_BD | X86_DR6_BS));
7104 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7105
7106 int rc;
7107 if (!pVmxTransient->fIsNestedGuest)
7108 {
7109 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7110
7111 /*
7112 * Prevents stepping twice over the same instruction when the guest is stepping using
7113 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7114 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7115 */
7116 if ( rc == VINF_EM_DBG_STEPPED
7117 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7118 {
7119 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7120 rc = VINF_EM_RAW_GUEST_TRAP;
7121 }
7122 }
7123 else
7124 rc = VINF_EM_RAW_GUEST_TRAP;
7125 Log6Func(("rc=%Rrc\n", rc));
7126 if (rc == VINF_EM_RAW_GUEST_TRAP)
7127 {
7128 /*
7129 * The exception was for the guest. Update DR6, DR7.GD and
7130 * IA32_DEBUGCTL.LBR before forwarding it.
7131 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7132 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7133 */
7134#ifndef IN_NEM_DARWIN
7135 VMMRZCallRing3Disable(pVCpu);
7136 HM_DISABLE_PREEMPT(pVCpu);
7137
7138 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7139 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7140 if (CPUMIsGuestDebugStateActive(pVCpu))
7141 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7142
7143 HM_RESTORE_PREEMPT();
7144 VMMRZCallRing3Enable(pVCpu);
7145#else
7146 /** @todo */
7147#endif
7148
7149 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7150 AssertRCReturn(rc, rc);
7151
7152 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7153 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7154
7155 /* Paranoia. */
7156 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7157 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7158
7159 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7160 AssertRC(rc);
7161
7162 /*
7163 * Raise #DB in the guest.
7164 *
7165 * It is important to reflect exactly what the VM-exit gave us (preserving the
7166 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7167 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7168 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7169 *
7170 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7171 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7172 */
7173 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7174 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7175 return VINF_SUCCESS;
7176 }
7177
7178 /*
7179 * Not a guest trap, must be a hypervisor related debug event then.
7180 * Update DR6 in case someone is interested in it.
7181 */
7182 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7183 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7184 CPUMSetHyperDR6(pVCpu, uDR6);
7185
7186 return rc;
7187}
7188
7189
7190/**
7191 * Hacks its way around the lovely mesa driver's backdoor accesses.
7192 *
7193 * @sa hmR0SvmHandleMesaDrvGp.
7194 */
7195static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7196{
7197 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7198 RT_NOREF(pCtx);
7199
7200 /* For now we'll just skip the instruction. */
7201 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7202}
7203
7204
7205/**
7206 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7207 * backdoor logging w/o checking what it is running inside.
7208 *
7209 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7210 * backdoor port and magic numbers loaded in registers.
7211 *
7212 * @returns true if it is, false if it isn't.
7213 * @sa hmR0SvmIsMesaDrvGp.
7214 */
7215DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7216{
7217 /* 0xed: IN eAX,dx */
7218 uint8_t abInstr[1];
7219 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7220 return false;
7221
7222 /* Check that it is #GP(0). */
7223 if (pVmxTransient->uExitIntErrorCode != 0)
7224 return false;
7225
7226 /* Check magic and port. */
7227 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7228 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7229 if (pCtx->rax != UINT32_C(0x564d5868))
7230 return false;
7231 if (pCtx->dx != UINT32_C(0x5658))
7232 return false;
7233
7234 /* Flat ring-3 CS. */
7235 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7236 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7237 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7238 if (pCtx->cs.Attr.n.u2Dpl != 3)
7239 return false;
7240 if (pCtx->cs.u64Base != 0)
7241 return false;
7242
7243 /* Check opcode. */
7244 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7245 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7246 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7247 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7248 if (RT_FAILURE(rc))
7249 return false;
7250 if (abInstr[0] != 0xed)
7251 return false;
7252
7253 return true;
7254}
7255
7256
7257/**
7258 * VM-exit exception handler for \#GP (General-protection exception).
7259 *
7260 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7261 */
7262static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7263{
7264 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7265 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7266
7267 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7268 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7269#ifndef IN_NEM_DARWIN
7270 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7271 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7272 { /* likely */ }
7273 else
7274#endif
7275 {
7276#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7277# ifndef IN_NEM_DARWIN
7278 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7279# else
7280 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7281# endif
7282#endif
7283 /*
7284 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7285 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7286 */
7287 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7288 AssertRCReturn(rc, rc);
7289 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7290 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7291
7292 if ( pVmxTransient->fIsNestedGuest
7293 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7294 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7295 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7296 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7297 else
7298 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7299 return rc;
7300 }
7301
7302#ifndef IN_NEM_DARWIN
7303 Assert(CPUMIsGuestInRealModeEx(pCtx));
7304 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7305 Assert(!pVmxTransient->fIsNestedGuest);
7306
7307 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7308 AssertRCReturn(rc, rc);
7309
7310 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7311 if (rcStrict == VINF_SUCCESS)
7312 {
7313 if (!CPUMIsGuestInRealModeEx(pCtx))
7314 {
7315 /*
7316 * The guest is no longer in real-mode, check if we can continue executing the
7317 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7318 */
7319 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7320 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7321 {
7322 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7323 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7324 }
7325 else
7326 {
7327 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7328 rcStrict = VINF_EM_RESCHEDULE;
7329 }
7330 }
7331 else
7332 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7333 }
7334 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7335 {
7336 rcStrict = VINF_SUCCESS;
7337 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7338 }
7339 return VBOXSTRICTRC_VAL(rcStrict);
7340#endif
7341}
7342
7343
7344/**
7345 * VM-exit exception handler for \#DE (Divide Error).
7346 *
7347 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7348 */
7349static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7350{
7351 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7352 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7353
7354 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7355 AssertRCReturn(rc, rc);
7356
7357 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7358 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7359 {
7360 uint8_t cbInstr = 0;
7361 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7362 if (rc2 == VINF_SUCCESS)
7363 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7364 else if (rc2 == VERR_NOT_FOUND)
7365 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7366 else
7367 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7368 }
7369 else
7370 rcStrict = VINF_SUCCESS; /* Do nothing. */
7371
7372 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7373 if (RT_FAILURE(rcStrict))
7374 {
7375 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7376 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7377 rcStrict = VINF_SUCCESS;
7378 }
7379
7380 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7381 return VBOXSTRICTRC_VAL(rcStrict);
7382}
7383
7384
7385/**
7386 * VM-exit exception handler wrapper for all other exceptions that are not handled
7387 * by a specific handler.
7388 *
7389 * This simply re-injects the exception back into the VM without any special
7390 * processing.
7391 *
7392 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7393 */
7394static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7395{
7396 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7397
7398#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7399# ifndef IN_NEM_DARWIN
7400 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7401 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7402 ("uVector=%#x u32XcptBitmap=%#X32\n",
7403 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7404 NOREF(pVmcsInfo);
7405# endif
7406#endif
7407
7408 /*
7409 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7410 * would have been handled while checking exits due to event delivery.
7411 */
7412 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7413
7414#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7415 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7416 AssertRCReturn(rc, rc);
7417 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7418#endif
7419
7420#ifdef VBOX_WITH_STATISTICS
7421 switch (uVector)
7422 {
7423 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7424 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7425 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7426 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7427 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7428 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7429 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7430 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7431 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7432 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7433 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7434 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7435 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7436 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7437 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7438 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7439 default:
7440 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7441 break;
7442 }
7443#endif
7444
7445 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7446 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7447 NOREF(uVector);
7448
7449 /* Re-inject the original exception into the guest. */
7450 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7451 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7452 return VINF_SUCCESS;
7453}
7454
7455
7456/**
7457 * VM-exit exception handler for all exceptions (except NMIs!).
7458 *
7459 * @remarks This may be called for both guests and nested-guests. Take care to not
7460 * make assumptions and avoid doing anything that is not relevant when
7461 * executing a nested-guest (e.g., Mesa driver hacks).
7462 */
7463static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7464{
7465 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7466
7467 /*
7468 * If this VM-exit occurred while delivering an event through the guest IDT, take
7469 * action based on the return code and additional hints (e.g. for page-faults)
7470 * that will be updated in the VMX transient structure.
7471 */
7472 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7473 if (rcStrict == VINF_SUCCESS)
7474 {
7475 /*
7476 * If an exception caused a VM-exit due to delivery of an event, the original
7477 * event may have to be re-injected into the guest. We shall reinject it and
7478 * continue guest execution. However, page-fault is a complicated case and
7479 * needs additional processing done in vmxHCExitXcptPF().
7480 */
7481 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7482 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7483 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7484 || uVector == X86_XCPT_PF)
7485 {
7486 switch (uVector)
7487 {
7488 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7489 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7490 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7491 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7492 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7493 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7494 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7495 default:
7496 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7497 }
7498 }
7499 /* else: inject pending event before resuming guest execution. */
7500 }
7501 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7502 {
7503 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7504 rcStrict = VINF_SUCCESS;
7505 }
7506
7507 return rcStrict;
7508}
7509/** @} */
7510
7511
7512/** @name VM-exit handlers.
7513 * @{
7514 */
7515/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7516/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7517/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7518
7519/**
7520 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7521 */
7522HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7523{
7524 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7525 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7526
7527#ifndef IN_NEM_DARWIN
7528 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7529 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7530 return VINF_SUCCESS;
7531 return VINF_EM_RAW_INTERRUPT;
7532#else
7533 return VINF_SUCCESS;
7534#endif
7535}
7536
7537
7538/**
7539 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7540 * VM-exit.
7541 */
7542HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7543{
7544 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7545 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7546
7547 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7548
7549 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7550 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7551 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7552
7553 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7554 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7555 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7556 NOREF(pVmcsInfo);
7557
7558 VBOXSTRICTRC rcStrict;
7559 switch (uExitIntType)
7560 {
7561#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7562 /*
7563 * Host physical NMIs:
7564 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7565 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7566 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7567 *
7568 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7569 * See Intel spec. 27.5.5 "Updating Non-Register State".
7570 */
7571 case VMX_EXIT_INT_INFO_TYPE_NMI:
7572 {
7573 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7574 break;
7575 }
7576#endif
7577
7578 /*
7579 * Privileged software exceptions (#DB from ICEBP),
7580 * Software exceptions (#BP and #OF),
7581 * Hardware exceptions:
7582 * Process the required exceptions and resume guest execution if possible.
7583 */
7584 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7585 Assert(uVector == X86_XCPT_DB);
7586 RT_FALL_THRU();
7587 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7588 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7589 RT_FALL_THRU();
7590 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7591 {
7592 NOREF(uVector);
7593 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7594 | HMVMX_READ_EXIT_INSTR_LEN
7595 | HMVMX_READ_IDT_VECTORING_INFO
7596 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7597 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7598 break;
7599 }
7600
7601 default:
7602 {
7603 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7604 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7605 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7606 break;
7607 }
7608 }
7609
7610 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7611 return rcStrict;
7612}
7613
7614
7615/**
7616 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7617 */
7618HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7619{
7620 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7621
7622 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7623 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7624 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7625
7626 /* Evaluate and deliver pending events and resume guest execution. */
7627 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7628 return VINF_SUCCESS;
7629}
7630
7631
7632/**
7633 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7634 */
7635HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7636{
7637 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7638
7639 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7640 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7641 {
7642 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7643 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7644 }
7645
7646 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7647
7648 /*
7649 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7650 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7651 */
7652 uint32_t fIntrState;
7653 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7654 AssertRC(rc);
7655 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7656 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7657 {
7658 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7659
7660 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7661 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7662 AssertRC(rc);
7663 }
7664
7665 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7666 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7667
7668 /* Evaluate and deliver pending events and resume guest execution. */
7669 return VINF_SUCCESS;
7670}
7671
7672
7673/**
7674 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7675 */
7676HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7677{
7678 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7679 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7680}
7681
7682
7683/**
7684 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7685 */
7686HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7687{
7688 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7689 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7690}
7691
7692
7693/**
7694 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7695 */
7696HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7697{
7698 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7699
7700 /*
7701 * Get the state we need and update the exit history entry.
7702 */
7703 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7704 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7705 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7706 AssertRCReturn(rc, rc);
7707
7708 VBOXSTRICTRC rcStrict;
7709 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7710 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7711 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7712 if (!pExitRec)
7713 {
7714 /*
7715 * Regular CPUID instruction execution.
7716 */
7717 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7718 if (rcStrict == VINF_SUCCESS)
7719 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7720 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7721 {
7722 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7723 rcStrict = VINF_SUCCESS;
7724 }
7725 }
7726 else
7727 {
7728 /*
7729 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7730 */
7731 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7732 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7733 AssertRCReturn(rc2, rc2);
7734
7735 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7736 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7737
7738 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7739 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7740
7741 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7742 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7743 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7744 }
7745 return rcStrict;
7746}
7747
7748
7749/**
7750 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7751 */
7752HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7753{
7754 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7755
7756 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7757 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7758 AssertRCReturn(rc, rc);
7759
7760 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7761 return VINF_EM_RAW_EMULATE_INSTR;
7762
7763 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7764 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7765}
7766
7767
7768/**
7769 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7770 */
7771HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7772{
7773 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7774
7775 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7776 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7777 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7778 AssertRCReturn(rc, rc);
7779
7780 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7781 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7782 {
7783 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7784 we must reset offsetting on VM-entry. See @bugref{6634}. */
7785 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7786 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7787 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7788 }
7789 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7790 {
7791 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7792 rcStrict = VINF_SUCCESS;
7793 }
7794 return rcStrict;
7795}
7796
7797
7798/**
7799 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7800 */
7801HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7802{
7803 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7804
7805 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7806 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7807 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7808 AssertRCReturn(rc, rc);
7809
7810 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7811 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7812 {
7813 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7814 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7815 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7816 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7818 }
7819 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7820 {
7821 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7822 rcStrict = VINF_SUCCESS;
7823 }
7824 return rcStrict;
7825}
7826
7827
7828/**
7829 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7830 */
7831HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7832{
7833 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7834
7835 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7836 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7837 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7838 AssertRCReturn(rc, rc);
7839
7840 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7841 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7842 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7843 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7844 {
7845 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7846 rcStrict = VINF_SUCCESS;
7847 }
7848 return rcStrict;
7849}
7850
7851
7852/**
7853 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7854 */
7855HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7856{
7857 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7858
7859 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7860 if (EMAreHypercallInstructionsEnabled(pVCpu))
7861 {
7862 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7863 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7864 | CPUMCTX_EXTRN_RFLAGS
7865 | CPUMCTX_EXTRN_CR0
7866 | CPUMCTX_EXTRN_SS
7867 | CPUMCTX_EXTRN_CS
7868 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7869 AssertRCReturn(rc, rc);
7870
7871 /* Perform the hypercall. */
7872 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7873 if (rcStrict == VINF_SUCCESS)
7874 {
7875 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7876 AssertRCReturn(rc, rc);
7877 }
7878 else
7879 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7880 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7881 || RT_FAILURE(rcStrict));
7882
7883 /* If the hypercall changes anything other than guest's general-purpose registers,
7884 we would need to reload the guest changed bits here before VM-entry. */
7885 }
7886 else
7887 Log4Func(("Hypercalls not enabled\n"));
7888
7889 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7890 if (RT_FAILURE(rcStrict))
7891 {
7892 vmxHCSetPendingXcptUD(pVCpu);
7893 rcStrict = VINF_SUCCESS;
7894 }
7895
7896 return rcStrict;
7897}
7898
7899
7900/**
7901 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7902 */
7903HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7904{
7905 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7906#ifndef IN_NEM_DARWIN
7907 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7908#endif
7909
7910 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7911 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7912 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7913 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7914 AssertRCReturn(rc, rc);
7915
7916 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7917
7918 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7919 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7920 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7921 {
7922 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7923 rcStrict = VINF_SUCCESS;
7924 }
7925 else
7926 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7927 VBOXSTRICTRC_VAL(rcStrict)));
7928 return rcStrict;
7929}
7930
7931
7932/**
7933 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7934 */
7935HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7936{
7937 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7938
7939 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7940 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7941 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7942 AssertRCReturn(rc, rc);
7943
7944 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7945 if (rcStrict == VINF_SUCCESS)
7946 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7947 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7948 {
7949 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7950 rcStrict = VINF_SUCCESS;
7951 }
7952
7953 return rcStrict;
7954}
7955
7956
7957/**
7958 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7959 */
7960HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7961{
7962 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7963
7964 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7965 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7966 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7967 AssertRCReturn(rc, rc);
7968
7969 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7970 if (RT_SUCCESS(rcStrict))
7971 {
7972 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7973 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7974 rcStrict = VINF_SUCCESS;
7975 }
7976
7977 return rcStrict;
7978}
7979
7980
7981/**
7982 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7983 * VM-exit.
7984 */
7985HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7986{
7987 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7988 return VINF_EM_RESET;
7989}
7990
7991
7992/**
7993 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7994 */
7995HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7996{
7997 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7998
7999 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8000 AssertRCReturn(rc, rc);
8001
8002 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8003 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8004 rc = VINF_SUCCESS;
8005 else
8006 rc = VINF_EM_HALT;
8007
8008 if (rc != VINF_SUCCESS)
8009 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8010 return rc;
8011}
8012
8013
8014#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8015/**
8016 * VM-exit handler for instructions that result in a \#UD exception delivered to
8017 * the guest.
8018 */
8019HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8020{
8021 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8022 vmxHCSetPendingXcptUD(pVCpu);
8023 return VINF_SUCCESS;
8024}
8025#endif
8026
8027
8028/**
8029 * VM-exit handler for expiry of the VMX-preemption timer.
8030 */
8031HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8032{
8033 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8034
8035 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8036 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8037Log12(("vmxHCExitPreemptTimer:\n"));
8038
8039 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8040 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8041 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8042 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8043 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8044}
8045
8046
8047/**
8048 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8049 */
8050HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8051{
8052 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8053
8054 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8055 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8056 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8057 AssertRCReturn(rc, rc);
8058
8059 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8061 : HM_CHANGED_RAISED_XCPT_MASK);
8062
8063#ifndef IN_NEM_DARWIN
8064 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8065 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8066 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8067 {
8068 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8069 hmR0VmxUpdateStartVmFunction(pVCpu);
8070 }
8071#endif
8072
8073 return rcStrict;
8074}
8075
8076
8077/**
8078 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8079 */
8080HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8081{
8082 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8083
8084 /** @todo Enable the new code after finding a reliably guest test-case. */
8085#if 1
8086 return VERR_EM_INTERPRETER;
8087#else
8088 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8089 | HMVMX_READ_EXIT_INSTR_INFO
8090 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8091 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8092 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8093 AssertRCReturn(rc, rc);
8094
8095 /* Paranoia. Ensure this has a memory operand. */
8096 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8097
8098 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8099 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8100 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8101 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8102
8103 RTGCPTR GCPtrDesc;
8104 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8105
8106 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8107 GCPtrDesc, uType);
8108 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8109 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8110 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8111 {
8112 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8113 rcStrict = VINF_SUCCESS;
8114 }
8115 return rcStrict;
8116#endif
8117}
8118
8119
8120/**
8121 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8122 * VM-exit.
8123 */
8124HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8125{
8126 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8127 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8128 AssertRCReturn(rc, rc);
8129
8130 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8131 if (RT_FAILURE(rc))
8132 return rc;
8133
8134 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8135 NOREF(uInvalidReason);
8136
8137#ifdef VBOX_STRICT
8138 uint32_t fIntrState;
8139 uint64_t u64Val;
8140 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8141 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8142 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8143
8144 Log4(("uInvalidReason %u\n", uInvalidReason));
8145 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8146 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8147 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8148
8149 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8150 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8151 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8152 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8153 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8154 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8155 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8156 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8157 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8158 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8159 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8160 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8161# ifndef IN_NEM_DARWIN
8162 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8163 {
8164 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8165 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8166 }
8167
8168 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8169# endif
8170#endif
8171
8172 return VERR_VMX_INVALID_GUEST_STATE;
8173}
8174
8175/**
8176 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8177 */
8178HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8179{
8180 /*
8181 * Cumulative notes of all recognized but unexpected VM-exits.
8182 *
8183 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8184 * nested-paging is used.
8185 *
8186 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8187 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8188 * this function (and thereby stop VM execution) for handling such instructions.
8189 *
8190 *
8191 * VMX_EXIT_INIT_SIGNAL:
8192 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8193 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8194 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8195 *
8196 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8197 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8198 * See Intel spec. "23.8 Restrictions on VMX operation".
8199 *
8200 * VMX_EXIT_SIPI:
8201 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8202 * activity state is used. We don't make use of it as our guests don't have direct
8203 * access to the host local APIC.
8204 *
8205 * See Intel spec. 25.3 "Other Causes of VM-exits".
8206 *
8207 * VMX_EXIT_IO_SMI:
8208 * VMX_EXIT_SMI:
8209 * This can only happen if we support dual-monitor treatment of SMI, which can be
8210 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8211 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8212 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8213 *
8214 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8215 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8216 *
8217 * VMX_EXIT_ERR_MSR_LOAD:
8218 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8219 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8220 * execution.
8221 *
8222 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8223 *
8224 * VMX_EXIT_ERR_MACHINE_CHECK:
8225 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8226 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8227 * #MC exception abort class exception is raised. We thus cannot assume a
8228 * reasonable chance of continuing any sort of execution and we bail.
8229 *
8230 * See Intel spec. 15.1 "Machine-check Architecture".
8231 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8232 *
8233 * VMX_EXIT_PML_FULL:
8234 * VMX_EXIT_VIRTUALIZED_EOI:
8235 * VMX_EXIT_APIC_WRITE:
8236 * We do not currently support any of these features and thus they are all unexpected
8237 * VM-exits.
8238 *
8239 * VMX_EXIT_GDTR_IDTR_ACCESS:
8240 * VMX_EXIT_LDTR_TR_ACCESS:
8241 * VMX_EXIT_RDRAND:
8242 * VMX_EXIT_RSM:
8243 * VMX_EXIT_VMFUNC:
8244 * VMX_EXIT_ENCLS:
8245 * VMX_EXIT_RDSEED:
8246 * VMX_EXIT_XSAVES:
8247 * VMX_EXIT_XRSTORS:
8248 * VMX_EXIT_UMWAIT:
8249 * VMX_EXIT_TPAUSE:
8250 * VMX_EXIT_LOADIWKEY:
8251 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8252 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8253 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8254 *
8255 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8256 */
8257 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8258 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8259 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8260}
8261
8262
8263/**
8264 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8265 */
8266HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8267{
8268 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8269
8270 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8271
8272 /** @todo Optimize this: We currently drag in the whole MSR state
8273 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8274 * MSRs required. That would require changes to IEM and possibly CPUM too.
8275 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8276 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8277 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8278 int rc;
8279 switch (idMsr)
8280 {
8281 default:
8282 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8283 __FUNCTION__);
8284 AssertRCReturn(rc, rc);
8285 break;
8286 case MSR_K8_FS_BASE:
8287 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8288 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8289 AssertRCReturn(rc, rc);
8290 break;
8291 case MSR_K8_GS_BASE:
8292 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8293 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8294 AssertRCReturn(rc, rc);
8295 break;
8296 }
8297
8298 Log4Func(("ecx=%#RX32\n", idMsr));
8299
8300#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8301 Assert(!pVmxTransient->fIsNestedGuest);
8302 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8303 {
8304 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8305 && idMsr != MSR_K6_EFER)
8306 {
8307 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8308 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8309 }
8310 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8311 {
8312 Assert(pVmcsInfo->pvMsrBitmap);
8313 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8314 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8315 {
8316 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8317 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8318 }
8319 }
8320 }
8321#endif
8322
8323 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8324 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8325 if (rcStrict == VINF_SUCCESS)
8326 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8327 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8328 {
8329 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8330 rcStrict = VINF_SUCCESS;
8331 }
8332 else
8333 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8334 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8335
8336 return rcStrict;
8337}
8338
8339
8340/**
8341 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8342 */
8343HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8344{
8345 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8346
8347 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8348
8349 /*
8350 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8351 * Although we don't need to fetch the base as it will be overwritten shortly, while
8352 * loading guest-state we would also load the entire segment register including limit
8353 * and attributes and thus we need to load them here.
8354 */
8355 /** @todo Optimize this: We currently drag in the whole MSR state
8356 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8357 * MSRs required. That would require changes to IEM and possibly CPUM too.
8358 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8359 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8360 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8361 int rc;
8362 switch (idMsr)
8363 {
8364 default:
8365 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8366 __FUNCTION__);
8367 AssertRCReturn(rc, rc);
8368 break;
8369
8370 case MSR_K8_FS_BASE:
8371 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8372 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8373 AssertRCReturn(rc, rc);
8374 break;
8375 case MSR_K8_GS_BASE:
8376 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8377 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8378 AssertRCReturn(rc, rc);
8379 break;
8380 }
8381 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8382
8383 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8384 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8385
8386 if (rcStrict == VINF_SUCCESS)
8387 {
8388 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8389
8390 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8391 if ( idMsr == MSR_IA32_APICBASE
8392 || ( idMsr >= MSR_IA32_X2APIC_START
8393 && idMsr <= MSR_IA32_X2APIC_END))
8394 {
8395 /*
8396 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8397 * When full APIC register virtualization is implemented we'll have to make
8398 * sure APIC state is saved from the VMCS before IEM changes it.
8399 */
8400 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8401 }
8402 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8403 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8404 else if (idMsr == MSR_K6_EFER)
8405 {
8406 /*
8407 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8408 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8409 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8410 */
8411 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8412 }
8413
8414 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8415 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8416 {
8417 switch (idMsr)
8418 {
8419 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8420 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8421 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8422 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8423 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8424 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8425 default:
8426 {
8427#ifndef IN_NEM_DARWIN
8428 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8429 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8430 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8431 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8432#else
8433 AssertMsgFailed(("TODO\n"));
8434#endif
8435 break;
8436 }
8437 }
8438 }
8439#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8440 else
8441 {
8442 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8443 switch (idMsr)
8444 {
8445 case MSR_IA32_SYSENTER_CS:
8446 case MSR_IA32_SYSENTER_EIP:
8447 case MSR_IA32_SYSENTER_ESP:
8448 case MSR_K8_FS_BASE:
8449 case MSR_K8_GS_BASE:
8450 {
8451 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8452 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8453 }
8454
8455 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8456 default:
8457 {
8458 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8459 {
8460 /* EFER MSR writes are always intercepted. */
8461 if (idMsr != MSR_K6_EFER)
8462 {
8463 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8464 idMsr));
8465 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8466 }
8467 }
8468
8469 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8470 {
8471 Assert(pVmcsInfo->pvMsrBitmap);
8472 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8473 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8474 {
8475 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8476 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8477 }
8478 }
8479 break;
8480 }
8481 }
8482 }
8483#endif /* VBOX_STRICT */
8484 }
8485 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8486 {
8487 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8488 rcStrict = VINF_SUCCESS;
8489 }
8490 else
8491 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8492 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8493
8494 return rcStrict;
8495}
8496
8497
8498/**
8499 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8500 */
8501HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8502{
8503 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8504
8505 /** @todo The guest has likely hit a contended spinlock. We might want to
8506 * poke a schedule different guest VCPU. */
8507 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8508 if (RT_SUCCESS(rc))
8509 return VINF_EM_RAW_INTERRUPT;
8510
8511 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8512 return rc;
8513}
8514
8515
8516/**
8517 * VM-exit handler for when the TPR value is lowered below the specified
8518 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8519 */
8520HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8521{
8522 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8523 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8524
8525 /*
8526 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8527 * We'll re-evaluate pending interrupts and inject them before the next VM
8528 * entry so we can just continue execution here.
8529 */
8530 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8531 return VINF_SUCCESS;
8532}
8533
8534
8535/**
8536 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8537 * VM-exit.
8538 *
8539 * @retval VINF_SUCCESS when guest execution can continue.
8540 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8541 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8542 * incompatible guest state for VMX execution (real-on-v86 case).
8543 */
8544HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8545{
8546 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8547 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8548
8549 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8550 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8551 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8552
8553 VBOXSTRICTRC rcStrict;
8554 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8555 uint64_t const uExitQual = pVmxTransient->uExitQual;
8556 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8557 switch (uAccessType)
8558 {
8559 /*
8560 * MOV to CRx.
8561 */
8562 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8563 {
8564 /*
8565 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8566 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8567 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8568 * PAE PDPTEs as well.
8569 */
8570 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8571 AssertRCReturn(rc, rc);
8572
8573 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8574#ifndef IN_NEM_DARWIN
8575 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8576#endif
8577 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8578 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8579
8580 /*
8581 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8582 * - When nested paging isn't used.
8583 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8584 * - We are executing in the VM debug loop.
8585 */
8586#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8587# ifndef IN_NEM_DARWIN
8588 Assert( iCrReg != 3
8589 || !VM_IS_VMX_NESTED_PAGING(pVM)
8590 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8591 || pVCpu->hmr0.s.fUsingDebugLoop);
8592# else
8593 Assert( iCrReg != 3
8594 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8595# endif
8596#endif
8597
8598 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8599 Assert( iCrReg != 8
8600 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8601
8602 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8603 AssertMsg( rcStrict == VINF_SUCCESS
8604 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8605
8606#ifndef IN_NEM_DARWIN
8607 /*
8608 * This is a kludge for handling switches back to real mode when we try to use
8609 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8610 * deal with special selector values, so we have to return to ring-3 and run
8611 * there till the selector values are V86 mode compatible.
8612 *
8613 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8614 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8615 * this function.
8616 */
8617 if ( iCrReg == 0
8618 && rcStrict == VINF_SUCCESS
8619 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8620 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8621 && (uOldCr0 & X86_CR0_PE)
8622 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8623 {
8624 /** @todo Check selectors rather than returning all the time. */
8625 Assert(!pVmxTransient->fIsNestedGuest);
8626 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8627 rcStrict = VINF_EM_RESCHEDULE_REM;
8628 }
8629#endif
8630
8631 break;
8632 }
8633
8634 /*
8635 * MOV from CRx.
8636 */
8637 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8638 {
8639 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8640 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8641
8642 /*
8643 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8644 * - When nested paging isn't used.
8645 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8646 * - We are executing in the VM debug loop.
8647 */
8648#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8649# ifndef IN_NEM_DARWIN
8650 Assert( iCrReg != 3
8651 || !VM_IS_VMX_NESTED_PAGING(pVM)
8652 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8653 || pVCpu->hmr0.s.fLeaveDone);
8654# else
8655 Assert( iCrReg != 3
8656 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8657# endif
8658#endif
8659
8660 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8661 Assert( iCrReg != 8
8662 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8663
8664 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8665 break;
8666 }
8667
8668 /*
8669 * CLTS (Clear Task-Switch Flag in CR0).
8670 */
8671 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8672 {
8673 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8674 break;
8675 }
8676
8677 /*
8678 * LMSW (Load Machine-Status Word into CR0).
8679 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8680 */
8681 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8682 {
8683 RTGCPTR GCPtrEffDst;
8684 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8685 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8686 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8687 if (fMemOperand)
8688 {
8689 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8690 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8691 }
8692 else
8693 GCPtrEffDst = NIL_RTGCPTR;
8694 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8695 break;
8696 }
8697
8698 default:
8699 {
8700 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8701 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8702 }
8703 }
8704
8705 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8706 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8707 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8708
8709 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8710 NOREF(pVM);
8711 return rcStrict;
8712}
8713
8714
8715/**
8716 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8717 * VM-exit.
8718 */
8719HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8720{
8721 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8722 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8723
8724 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8725 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8726 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8727 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8728#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8729 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8730 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8731 AssertRCReturn(rc, rc);
8732
8733 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8734 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8735 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8736 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8737 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8738 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8739 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8740 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8741
8742 /*
8743 * Update exit history to see if this exit can be optimized.
8744 */
8745 VBOXSTRICTRC rcStrict;
8746 PCEMEXITREC pExitRec = NULL;
8747 if ( !fGstStepping
8748 && !fDbgStepping)
8749 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8750 !fIOString
8751 ? !fIOWrite
8752 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8753 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8754 : !fIOWrite
8755 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8756 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8757 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8758 if (!pExitRec)
8759 {
8760 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8761 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8762
8763 uint32_t const cbValue = s_aIOSizes[uIOSize];
8764 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8765 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8766 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8767 if (fIOString)
8768 {
8769 /*
8770 * INS/OUTS - I/O String instruction.
8771 *
8772 * Use instruction-information if available, otherwise fall back on
8773 * interpreting the instruction.
8774 */
8775 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8776 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8777 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8778 if (fInsOutsInfo)
8779 {
8780 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8781 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8782 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8783 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8784 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8785 if (fIOWrite)
8786 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8787 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8788 else
8789 {
8790 /*
8791 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8792 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8793 * See Intel Instruction spec. for "INS".
8794 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8795 */
8796 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8797 }
8798 }
8799 else
8800 rcStrict = IEMExecOne(pVCpu);
8801
8802 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8803 fUpdateRipAlready = true;
8804 }
8805 else
8806 {
8807 /*
8808 * IN/OUT - I/O instruction.
8809 */
8810 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8811 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8812 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8813 if (fIOWrite)
8814 {
8815 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8816 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8817#ifndef IN_NEM_DARWIN
8818 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8819 && !pCtx->eflags.Bits.u1TF)
8820 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8821#endif
8822 }
8823 else
8824 {
8825 uint32_t u32Result = 0;
8826 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8827 if (IOM_SUCCESS(rcStrict))
8828 {
8829 /* Save result of I/O IN instr. in AL/AX/EAX. */
8830 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8831 }
8832#ifndef IN_NEM_DARWIN
8833 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8834 && !pCtx->eflags.Bits.u1TF)
8835 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8836#endif
8837 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8838 }
8839 }
8840
8841 if (IOM_SUCCESS(rcStrict))
8842 {
8843 if (!fUpdateRipAlready)
8844 {
8845 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8846 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8847 }
8848
8849 /*
8850 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8851 * while booting Fedora 17 64-bit guest.
8852 *
8853 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8854 */
8855 if (fIOString)
8856 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8857
8858 /*
8859 * If any I/O breakpoints are armed, we need to check if one triggered
8860 * and take appropriate action.
8861 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8862 */
8863#if 1
8864 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8865#else
8866 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8867 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8868 AssertRCReturn(rc, rc);
8869#endif
8870
8871 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8872 * execution engines about whether hyper BPs and such are pending. */
8873 uint32_t const uDr7 = pCtx->dr[7];
8874 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8875 && X86_DR7_ANY_RW_IO(uDr7)
8876 && (pCtx->cr4 & X86_CR4_DE))
8877 || DBGFBpIsHwIoArmed(pVM)))
8878 {
8879 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8880
8881#ifndef IN_NEM_DARWIN
8882 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8883 VMMRZCallRing3Disable(pVCpu);
8884 HM_DISABLE_PREEMPT(pVCpu);
8885
8886 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8887
8888 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8889 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8890 {
8891 /* Raise #DB. */
8892 if (fIsGuestDbgActive)
8893 ASMSetDR6(pCtx->dr[6]);
8894 if (pCtx->dr[7] != uDr7)
8895 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8896
8897 vmxHCSetPendingXcptDB(pVCpu);
8898 }
8899 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8900 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8901 else if ( rcStrict2 != VINF_SUCCESS
8902 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8903 rcStrict = rcStrict2;
8904 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8905
8906 HM_RESTORE_PREEMPT();
8907 VMMRZCallRing3Enable(pVCpu);
8908#else
8909 /** @todo */
8910#endif
8911 }
8912 }
8913
8914#ifdef VBOX_STRICT
8915 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8916 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8917 Assert(!fIOWrite);
8918 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8919 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8920 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8921 Assert(fIOWrite);
8922 else
8923 {
8924# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8925 * statuses, that the VMM device and some others may return. See
8926 * IOM_SUCCESS() for guidance. */
8927 AssertMsg( RT_FAILURE(rcStrict)
8928 || rcStrict == VINF_SUCCESS
8929 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8930 || rcStrict == VINF_EM_DBG_BREAKPOINT
8931 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8932 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8933# endif
8934 }
8935#endif
8936 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8937 }
8938 else
8939 {
8940 /*
8941 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8942 */
8943 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8944 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8945 AssertRCReturn(rc2, rc2);
8946 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8947 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8948 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8949 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8950 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8951 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8952
8953 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8954 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8955
8956 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8957 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8958 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8959 }
8960 return rcStrict;
8961}
8962
8963
8964/**
8965 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8966 * VM-exit.
8967 */
8968HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8969{
8970 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8971
8972 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8973 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8974 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8975 {
8976 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8977 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8978 {
8979 uint32_t uErrCode;
8980 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8981 {
8982 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8983 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8984 }
8985 else
8986 uErrCode = 0;
8987
8988 RTGCUINTPTR GCPtrFaultAddress;
8989 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8990 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8991 else
8992 GCPtrFaultAddress = 0;
8993
8994 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8995
8996 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8997 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8998
8999 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9000 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9001 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9002 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9003 }
9004 }
9005
9006 /* Fall back to the interpreter to emulate the task-switch. */
9007 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9008 return VERR_EM_INTERPRETER;
9009}
9010
9011
9012/**
9013 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9014 */
9015HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9016{
9017 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9018
9019 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9020 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9021 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9022 AssertRC(rc);
9023 return VINF_EM_DBG_STEPPED;
9024}
9025
9026
9027/**
9028 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9029 */
9030HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9031{
9032 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9033 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9034
9035 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9036 | HMVMX_READ_EXIT_INSTR_LEN
9037 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9038 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9039 | HMVMX_READ_IDT_VECTORING_INFO
9040 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9041
9042 /*
9043 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9044 */
9045 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9046 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9047 {
9048 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9049 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9050 {
9051 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9052 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9053 }
9054 }
9055 else
9056 {
9057 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9058 return rcStrict;
9059 }
9060
9061 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9062 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9063 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9064 AssertRCReturn(rc, rc);
9065
9066 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9067 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9068 switch (uAccessType)
9069 {
9070#ifndef IN_NEM_DARWIN
9071 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9072 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9073 {
9074 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9075 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9076 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9077
9078 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9079 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9080 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9081 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9082 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9083
9084 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9085 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9086 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9087 if ( rcStrict == VINF_SUCCESS
9088 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9089 || rcStrict == VERR_PAGE_NOT_PRESENT)
9090 {
9091 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9092 | HM_CHANGED_GUEST_APIC_TPR);
9093 rcStrict = VINF_SUCCESS;
9094 }
9095 break;
9096 }
9097#else
9098 /** @todo */
9099#endif
9100
9101 default:
9102 {
9103 Log4Func(("uAccessType=%#x\n", uAccessType));
9104 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9105 break;
9106 }
9107 }
9108
9109 if (rcStrict != VINF_SUCCESS)
9110 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9111 return rcStrict;
9112}
9113
9114
9115/**
9116 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9117 * VM-exit.
9118 */
9119HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9120{
9121 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9122 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9123
9124 /*
9125 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9126 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9127 * must emulate the MOV DRx access.
9128 */
9129 if (!pVmxTransient->fIsNestedGuest)
9130 {
9131 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9132 if ( pVmxTransient->fWasGuestDebugStateActive
9133#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9134 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9135#endif
9136 )
9137 {
9138 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9139 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9140 }
9141
9142 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9143 && !pVmxTransient->fWasHyperDebugStateActive)
9144 {
9145 Assert(!DBGFIsStepping(pVCpu));
9146 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9147
9148 /* Whether we disable intercepting MOV DRx instructions and resume
9149 the current one, or emulate it and keep intercepting them is
9150 configurable. Though it usually comes down to whether there are
9151 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9152#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9153 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9154#else
9155 bool const fResumeInstruction = true;
9156#endif
9157 if (fResumeInstruction)
9158 {
9159 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9160 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9161 AssertRC(rc);
9162 }
9163
9164#ifndef IN_NEM_DARWIN
9165 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9166 VMMRZCallRing3Disable(pVCpu);
9167 HM_DISABLE_PREEMPT(pVCpu);
9168
9169 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9170 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9171 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9172
9173 HM_RESTORE_PREEMPT();
9174 VMMRZCallRing3Enable(pVCpu);
9175#else
9176 CPUMR3NemActivateGuestDebugState(pVCpu);
9177 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9178 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9179#endif
9180
9181 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9182 if (fResumeInstruction)
9183 {
9184#ifdef VBOX_WITH_STATISTICS
9185 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9186 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9187 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9188 else
9189 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9190#endif
9191 return VINF_SUCCESS;
9192 }
9193 }
9194 }
9195
9196 /*
9197 * Import state. We must have DR7 loaded here as it's always consulted,
9198 * both for reading and writing. The other debug registers are never
9199 * exported as such.
9200 */
9201 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9202 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9203 | CPUMCTX_EXTRN_GPRS_MASK
9204 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9205 AssertRCReturn(rc, rc);
9206
9207 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9208 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9209 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9210 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9211
9212 VBOXSTRICTRC rcStrict;
9213 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9214 {
9215 /*
9216 * Write DRx register.
9217 */
9218 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9219 AssertMsg( rcStrict == VINF_SUCCESS
9220 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9221
9222 if (rcStrict == VINF_SUCCESS)
9223 {
9224 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9225 * kept it for now to avoid breaking something non-obvious. */
9226 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9227 | HM_CHANGED_GUEST_DR7);
9228 /* Update the DR6 register if guest debug state is active, otherwise we'll
9229 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9230 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9231 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9232 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9233 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9234 }
9235 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9236 {
9237 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9238 rcStrict = VINF_SUCCESS;
9239 }
9240
9241 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9242 }
9243 else
9244 {
9245 /*
9246 * Read DRx register into a general purpose register.
9247 */
9248 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9249 AssertMsg( rcStrict == VINF_SUCCESS
9250 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9251
9252 if (rcStrict == VINF_SUCCESS)
9253 {
9254 if (iGReg == X86_GREG_xSP)
9255 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9256 | HM_CHANGED_GUEST_RSP);
9257 else
9258 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9259 }
9260 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9261 {
9262 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9263 rcStrict = VINF_SUCCESS;
9264 }
9265
9266 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9267 }
9268
9269 return rcStrict;
9270}
9271
9272
9273/**
9274 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9275 * Conditional VM-exit.
9276 */
9277HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9278{
9279 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9280
9281#ifndef IN_NEM_DARWIN
9282 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9283
9284 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9285 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9286 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9287 | HMVMX_READ_IDT_VECTORING_INFO
9288 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9289 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9290
9291 /*
9292 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9293 */
9294 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9295 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9296 {
9297 /*
9298 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9299 * instruction emulation to inject the original event. Otherwise, injecting the original event
9300 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9301 */
9302 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9303 { /* likely */ }
9304 else
9305 {
9306 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9307# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9308 /** @todo NSTVMX: Think about how this should be handled. */
9309 if (pVmxTransient->fIsNestedGuest)
9310 return VERR_VMX_IPE_3;
9311# endif
9312 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9313 }
9314 }
9315 else
9316 {
9317 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9318 return rcStrict;
9319 }
9320
9321 /*
9322 * Get sufficient state and update the exit history entry.
9323 */
9324 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9325 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9326 AssertRCReturn(rc, rc);
9327
9328 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9329 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9330 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9331 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9332 if (!pExitRec)
9333 {
9334 /*
9335 * If we succeed, resume guest execution.
9336 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9337 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9338 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9339 * weird case. See @bugref{6043}.
9340 */
9341 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9342/** @todo bird: We can probably just go straight to IOM here and assume that
9343 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9344 * well. However, we need to address that aliasing workarounds that
9345 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9346 *
9347 * Might also be interesting to see if we can get this done more or
9348 * less locklessly inside IOM. Need to consider the lookup table
9349 * updating and use a bit more carefully first (or do all updates via
9350 * rendezvous) */
9351 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9352 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9353 if ( rcStrict == VINF_SUCCESS
9354 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9355 || rcStrict == VERR_PAGE_NOT_PRESENT)
9356 {
9357 /* Successfully handled MMIO operation. */
9358 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9359 | HM_CHANGED_GUEST_APIC_TPR);
9360 rcStrict = VINF_SUCCESS;
9361 }
9362 }
9363 else
9364 {
9365 /*
9366 * Frequent exit or something needing probing. Call EMHistoryExec.
9367 */
9368 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9369 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9370
9371 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9372 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9373
9374 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9375 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9376 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9377 }
9378 return rcStrict;
9379#else
9380 AssertFailed();
9381 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9382#endif
9383}
9384
9385
9386/**
9387 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9388 * VM-exit.
9389 */
9390HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9391{
9392 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9393#ifndef IN_NEM_DARWIN
9394 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9395
9396 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9397 | HMVMX_READ_EXIT_INSTR_LEN
9398 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9399 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9400 | HMVMX_READ_IDT_VECTORING_INFO
9401 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9402 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9403
9404 /*
9405 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9406 */
9407 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9408 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9409 {
9410 /*
9411 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9412 * we shall resolve the nested #PF and re-inject the original event.
9413 */
9414 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9415 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9416 }
9417 else
9418 {
9419 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9420 return rcStrict;
9421 }
9422
9423 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9424 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9425 AssertRCReturn(rc, rc);
9426
9427 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9428 uint64_t const uExitQual = pVmxTransient->uExitQual;
9429 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9430
9431 RTGCUINT uErrorCode = 0;
9432 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9433 uErrorCode |= X86_TRAP_PF_ID;
9434 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9435 uErrorCode |= X86_TRAP_PF_RW;
9436 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9437 uErrorCode |= X86_TRAP_PF_P;
9438
9439 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9440 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9441
9442 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9443
9444 /*
9445 * Handle the pagefault trap for the nested shadow table.
9446 */
9447 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9448 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9449 TRPMResetTrap(pVCpu);
9450
9451 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9452 if ( rcStrict == VINF_SUCCESS
9453 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9454 || rcStrict == VERR_PAGE_NOT_PRESENT)
9455 {
9456 /* Successfully synced our nested page tables. */
9457 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9458 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9459 return VINF_SUCCESS;
9460 }
9461 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9462 return rcStrict;
9463
9464#else /* IN_NEM_DARWIN */
9465 PVM pVM = pVCpu->CTX_SUFF(pVM);
9466 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9467 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9468 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9469 vmxHCImportGuestRip(pVCpu);
9470 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9471
9472 /*
9473 * Ask PGM for information about the given GCPhys. We need to check if we're
9474 * out of sync first.
9475 */
9476 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9477 false,
9478 false };
9479 PGMPHYSNEMPAGEINFO Info;
9480 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9481 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9482 if (RT_SUCCESS(rc))
9483 {
9484 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9485 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9486 {
9487 if (State.fCanResume)
9488 {
9489 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9490 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9491 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9492 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9493 State.fDidSomething ? "" : " no-change"));
9494 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9495 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9496 return VINF_SUCCESS;
9497 }
9498 }
9499
9500 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9501 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9502 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9503 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9504 State.fDidSomething ? "" : " no-change"));
9505 }
9506 else
9507 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9508 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9509 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9510
9511 /*
9512 * Emulate the memory access, either access handler or special memory.
9513 */
9514 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9515 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9516 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9517 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9518 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9519
9520 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9521 AssertRCReturn(rc, rc);
9522
9523 VBOXSTRICTRC rcStrict;
9524 if (!pExitRec)
9525 rcStrict = IEMExecOne(pVCpu);
9526 else
9527 {
9528 /* Frequent access or probing. */
9529 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9530 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9531 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9532 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9533 }
9534
9535 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9536
9537 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9538 return rcStrict;
9539#endif /* IN_NEM_DARWIN */
9540}
9541
9542#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9543
9544/**
9545 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9546 */
9547HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9548{
9549 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9550
9551 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9552 | HMVMX_READ_EXIT_INSTR_INFO
9553 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9554 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9555 | CPUMCTX_EXTRN_SREG_MASK
9556 | CPUMCTX_EXTRN_HWVIRT
9557 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9558 AssertRCReturn(rc, rc);
9559
9560 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9561
9562 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9563 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9564
9565 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9566 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9567 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9568 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9569 {
9570 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9571 rcStrict = VINF_SUCCESS;
9572 }
9573 return rcStrict;
9574}
9575
9576
9577/**
9578 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9579 */
9580HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9581{
9582 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9583
9584 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9585 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9586 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9587 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9588 AssertRCReturn(rc, rc);
9589
9590 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9591
9592 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9593 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9594 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9595 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9596 {
9597 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9598 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9599 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9600 }
9601 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9602 return rcStrict;
9603}
9604
9605
9606/**
9607 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9608 */
9609HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9610{
9611 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9612
9613 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9614 | HMVMX_READ_EXIT_INSTR_INFO
9615 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9616 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9617 | CPUMCTX_EXTRN_SREG_MASK
9618 | CPUMCTX_EXTRN_HWVIRT
9619 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9620 AssertRCReturn(rc, rc);
9621
9622 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9623
9624 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9625 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9626
9627 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9628 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9629 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9630 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9631 {
9632 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9633 rcStrict = VINF_SUCCESS;
9634 }
9635 return rcStrict;
9636}
9637
9638
9639/**
9640 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9641 */
9642HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9643{
9644 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9645
9646 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9647 | HMVMX_READ_EXIT_INSTR_INFO
9648 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9649 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9650 | CPUMCTX_EXTRN_SREG_MASK
9651 | CPUMCTX_EXTRN_HWVIRT
9652 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9653 AssertRCReturn(rc, rc);
9654
9655 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9656
9657 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9658 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9659
9660 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9661 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9662 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9663 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9664 {
9665 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9666 rcStrict = VINF_SUCCESS;
9667 }
9668 return rcStrict;
9669}
9670
9671
9672/**
9673 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9674 */
9675HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9676{
9677 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9678
9679 /*
9680 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9681 * thus might not need to import the shadow VMCS state, it's safer just in case
9682 * code elsewhere dares look at unsynced VMCS fields.
9683 */
9684 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9685 | HMVMX_READ_EXIT_INSTR_INFO
9686 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9687 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9688 | CPUMCTX_EXTRN_SREG_MASK
9689 | CPUMCTX_EXTRN_HWVIRT
9690 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9691 AssertRCReturn(rc, rc);
9692
9693 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9694
9695 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9696 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9697 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9698
9699 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9700 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9701 {
9702 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9703
9704# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9705 /* Try for exit optimization. This is on the following instruction
9706 because it would be a waste of time to have to reinterpret the
9707 already decoded vmwrite instruction. */
9708 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9709 if (pExitRec)
9710 {
9711 /* Frequent access or probing. */
9712 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9713 AssertRCReturn(rc, rc);
9714
9715 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9716 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9717 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9718 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9719 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9720 }
9721# endif
9722 }
9723 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9724 {
9725 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9726 rcStrict = VINF_SUCCESS;
9727 }
9728 return rcStrict;
9729}
9730
9731
9732/**
9733 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9734 */
9735HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9736{
9737 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9738
9739 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9740 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9741 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9742 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9743 AssertRCReturn(rc, rc);
9744
9745 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9746
9747 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9748 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9749 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9750 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9751 {
9752 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9753 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9754 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9755 }
9756 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9757 return rcStrict;
9758}
9759
9760
9761/**
9762 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9763 */
9764HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9765{
9766 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9767
9768 /*
9769 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9770 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9771 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9772 */
9773 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9774 | HMVMX_READ_EXIT_INSTR_INFO
9775 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9776 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9777 | CPUMCTX_EXTRN_SREG_MASK
9778 | CPUMCTX_EXTRN_HWVIRT
9779 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9780 AssertRCReturn(rc, rc);
9781
9782 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9783
9784 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9785 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9786 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9787
9788 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9789 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9790 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9791 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9792 {
9793 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9794 rcStrict = VINF_SUCCESS;
9795 }
9796 return rcStrict;
9797}
9798
9799
9800/**
9801 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9802 */
9803HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9804{
9805 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9806
9807 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9808 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9809 | CPUMCTX_EXTRN_HWVIRT
9810 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9811 AssertRCReturn(rc, rc);
9812
9813 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9814
9815 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9816 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9818 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9819 {
9820 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9821 rcStrict = VINF_SUCCESS;
9822 }
9823 return rcStrict;
9824}
9825
9826
9827/**
9828 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9829 */
9830HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9831{
9832 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9833
9834 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9835 | HMVMX_READ_EXIT_INSTR_INFO
9836 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9837 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9838 | CPUMCTX_EXTRN_SREG_MASK
9839 | CPUMCTX_EXTRN_HWVIRT
9840 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9841 AssertRCReturn(rc, rc);
9842
9843 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9844
9845 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9846 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9847
9848 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9849 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9850 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9851 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9852 {
9853 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9854 rcStrict = VINF_SUCCESS;
9855 }
9856 return rcStrict;
9857}
9858
9859
9860/**
9861 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9862 */
9863HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9864{
9865 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9866
9867 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9868 | HMVMX_READ_EXIT_INSTR_INFO
9869 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9870 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9871 | CPUMCTX_EXTRN_SREG_MASK
9872 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9873 AssertRCReturn(rc, rc);
9874
9875 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9876
9877 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9878 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9879
9880 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9881 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9882 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9883 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9884 {
9885 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9886 rcStrict = VINF_SUCCESS;
9887 }
9888 return rcStrict;
9889}
9890
9891
9892# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9893/**
9894 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9895 */
9896HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9897{
9898 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9899
9900 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9901 | HMVMX_READ_EXIT_INSTR_INFO
9902 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9903 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9904 | CPUMCTX_EXTRN_SREG_MASK
9905 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9906 AssertRCReturn(rc, rc);
9907
9908 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9909
9910 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9911 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9912
9913 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9914 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9915 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9916 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9917 {
9918 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9919 rcStrict = VINF_SUCCESS;
9920 }
9921 return rcStrict;
9922}
9923# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9924#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9925/** @} */
9926
9927
9928#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9929/** @name Nested-guest VM-exit handlers.
9930 * @{
9931 */
9932/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9933/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9934/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9935
9936/**
9937 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9938 * Conditional VM-exit.
9939 */
9940HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9941{
9942 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9943
9944 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9945
9946 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9947 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9948 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9949
9950 switch (uExitIntType)
9951 {
9952# ifndef IN_NEM_DARWIN
9953 /*
9954 * Physical NMIs:
9955 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9956 */
9957 case VMX_EXIT_INT_INFO_TYPE_NMI:
9958 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9959# endif
9960
9961 /*
9962 * Hardware exceptions,
9963 * Software exceptions,
9964 * Privileged software exceptions:
9965 * Figure out if the exception must be delivered to the guest or the nested-guest.
9966 */
9967 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9968 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9969 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9970 {
9971 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9972 | HMVMX_READ_EXIT_INSTR_LEN
9973 | HMVMX_READ_IDT_VECTORING_INFO
9974 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9975
9976 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9977 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9978 {
9979 /* Exit qualification is required for debug and page-fault exceptions. */
9980 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9981
9982 /*
9983 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9984 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9985 * length. However, if delivery of a software interrupt, software exception or privileged
9986 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9987 */
9988 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9989 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9990 pVmxTransient->uExitIntErrorCode,
9991 pVmxTransient->uIdtVectoringInfo,
9992 pVmxTransient->uIdtVectoringErrorCode);
9993#ifdef DEBUG_ramshankar
9994 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9995 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9996 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9997 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9998 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9999 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10000#endif
10001 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10002 }
10003
10004 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10005 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10006 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10007 }
10008
10009 /*
10010 * Software interrupts:
10011 * VM-exits cannot be caused by software interrupts.
10012 *
10013 * External interrupts:
10014 * This should only happen when "acknowledge external interrupts on VM-exit"
10015 * control is set. However, we never set this when executing a guest or
10016 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10017 * the guest.
10018 */
10019 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10020 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10021 default:
10022 {
10023 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10024 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10025 }
10026 }
10027}
10028
10029
10030/**
10031 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10032 * Unconditional VM-exit.
10033 */
10034HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10035{
10036 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10037 return IEMExecVmxVmexitTripleFault(pVCpu);
10038}
10039
10040
10041/**
10042 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10043 */
10044HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10045{
10046 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10047
10048 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10049 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10050 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10051}
10052
10053
10054/**
10055 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10056 */
10057HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10058{
10059 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10060
10061 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10062 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10063 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10064}
10065
10066
10067/**
10068 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10069 * Unconditional VM-exit.
10070 */
10071HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10072{
10073 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10074
10075 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10076 | HMVMX_READ_EXIT_INSTR_LEN
10077 | HMVMX_READ_IDT_VECTORING_INFO
10078 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10079
10080 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10081 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10082 pVmxTransient->uIdtVectoringErrorCode);
10083 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10084}
10085
10086
10087/**
10088 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10089 */
10090HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10091{
10092 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10093
10094 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10095 {
10096 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10097 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10098 }
10099 return vmxHCExitHlt(pVCpu, pVmxTransient);
10100}
10101
10102
10103/**
10104 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10105 */
10106HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10107{
10108 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10109
10110 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10111 {
10112 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10113 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10114 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10115 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10116 }
10117 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10118}
10119
10120
10121/**
10122 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10123 */
10124HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10125{
10126 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10127
10128 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10129 {
10130 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10131 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10132 }
10133 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10134}
10135
10136
10137/**
10138 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10139 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10140 */
10141HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10142{
10143 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10144
10145 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10146 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10147
10148 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10149
10150 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10151 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10152 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10153
10154 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10155 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10156 u64VmcsField &= UINT64_C(0xffffffff);
10157
10158 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10159 {
10160 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10161 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10162 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10163 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10164 }
10165
10166 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10167 return vmxHCExitVmread(pVCpu, pVmxTransient);
10168 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10169}
10170
10171
10172/**
10173 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10174 */
10175HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10176{
10177 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10178
10179 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10180 {
10181 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10182 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10183 }
10184
10185 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10186}
10187
10188
10189/**
10190 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10191 * Conditional VM-exit.
10192 */
10193HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10194{
10195 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10196
10197 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10198 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10199
10200 VBOXSTRICTRC rcStrict;
10201 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10202 switch (uAccessType)
10203 {
10204 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10205 {
10206 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10207 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10208 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10209 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10210
10211 bool fIntercept;
10212 switch (iCrReg)
10213 {
10214 case 0:
10215 case 4:
10216 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10217 break;
10218
10219 case 3:
10220 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10221 break;
10222
10223 case 8:
10224 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10225 break;
10226
10227 default:
10228 fIntercept = false;
10229 break;
10230 }
10231 if (fIntercept)
10232 {
10233 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10234 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10235 }
10236 else
10237 {
10238 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10239 AssertRCReturn(rc, rc);
10240 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10241 }
10242 break;
10243 }
10244
10245 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10246 {
10247 /*
10248 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10249 * CR2 reads do not cause a VM-exit.
10250 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10251 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10252 */
10253 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10254 if ( iCrReg == 3
10255 || iCrReg == 8)
10256 {
10257 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10258 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10259 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10260 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10261 {
10262 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10263 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10264 }
10265 else
10266 {
10267 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10268 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10269 }
10270 }
10271 else
10272 {
10273 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10274 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10275 }
10276 break;
10277 }
10278
10279 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10280 {
10281 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10282 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10283 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10284 if ( (uGstHostMask & X86_CR0_TS)
10285 && (uReadShadow & X86_CR0_TS))
10286 {
10287 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10288 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10289 }
10290 else
10291 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10292 break;
10293 }
10294
10295 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10296 {
10297 RTGCPTR GCPtrEffDst;
10298 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10299 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10300 if (fMemOperand)
10301 {
10302 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10303 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10304 }
10305 else
10306 GCPtrEffDst = NIL_RTGCPTR;
10307
10308 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10309 {
10310 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10311 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10312 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10313 }
10314 else
10315 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10316 break;
10317 }
10318
10319 default:
10320 {
10321 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10322 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10323 }
10324 }
10325
10326 if (rcStrict == VINF_IEM_RAISED_XCPT)
10327 {
10328 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10329 rcStrict = VINF_SUCCESS;
10330 }
10331 return rcStrict;
10332}
10333
10334
10335/**
10336 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10337 * Conditional VM-exit.
10338 */
10339HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10340{
10341 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10342
10343 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10344 {
10345 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10346 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10347 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10348 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10349 }
10350 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10351}
10352
10353
10354/**
10355 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10356 * Conditional VM-exit.
10357 */
10358HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10359{
10360 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10361
10362 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10363
10364 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10365 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10366 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10367
10368 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10369 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10370 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10371 {
10372 /*
10373 * IN/OUT instruction:
10374 * - Provides VM-exit instruction length.
10375 *
10376 * INS/OUTS instruction:
10377 * - Provides VM-exit instruction length.
10378 * - Provides Guest-linear address.
10379 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10380 */
10381 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10382 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10383
10384 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10385 pVmxTransient->ExitInstrInfo.u = 0;
10386 pVmxTransient->uGuestLinearAddr = 0;
10387
10388 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10389 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10390 if (fIOString)
10391 {
10392 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10393 if (fVmxInsOutsInfo)
10394 {
10395 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10396 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10397 }
10398 }
10399
10400 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10401 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10402 }
10403 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10404}
10405
10406
10407/**
10408 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10409 */
10410HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10411{
10412 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10413
10414 uint32_t fMsrpm;
10415 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10416 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10417 else
10418 fMsrpm = VMXMSRPM_EXIT_RD;
10419
10420 if (fMsrpm & VMXMSRPM_EXIT_RD)
10421 {
10422 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10423 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10424 }
10425 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10426}
10427
10428
10429/**
10430 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10431 */
10432HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10433{
10434 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10435
10436 uint32_t fMsrpm;
10437 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10438 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10439 else
10440 fMsrpm = VMXMSRPM_EXIT_WR;
10441
10442 if (fMsrpm & VMXMSRPM_EXIT_WR)
10443 {
10444 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10445 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10446 }
10447 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10448}
10449
10450
10451/**
10452 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10453 */
10454HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10455{
10456 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10457
10458 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10459 {
10460 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10461 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10462 }
10463 return vmxHCExitMwait(pVCpu, pVmxTransient);
10464}
10465
10466
10467/**
10468 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10469 * VM-exit.
10470 */
10471HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10472{
10473 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10474
10475 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10476 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10477 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10478 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10479}
10480
10481
10482/**
10483 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10484 */
10485HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10486{
10487 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10488
10489 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10490 {
10491 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10492 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10493 }
10494 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10495}
10496
10497
10498/**
10499 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10500 */
10501HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10502{
10503 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10504
10505 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10506 * PAUSE when executing a nested-guest? If it does not, we would not need
10507 * to check for the intercepts here. Just call VM-exit... */
10508
10509 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10510 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10511 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10512 {
10513 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10514 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10515 }
10516 return vmxHCExitPause(pVCpu, pVmxTransient);
10517}
10518
10519
10520/**
10521 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10522 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10523 */
10524HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10525{
10526 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10527
10528 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10529 {
10530 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10531 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10532 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10533 }
10534 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10535}
10536
10537
10538/**
10539 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10540 * VM-exit.
10541 */
10542HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10543{
10544 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10545
10546 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10547 | HMVMX_READ_EXIT_INSTR_LEN
10548 | HMVMX_READ_IDT_VECTORING_INFO
10549 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10550
10551 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10552
10553 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10554 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10555
10556 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10557 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10558 pVmxTransient->uIdtVectoringErrorCode);
10559 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10560}
10561
10562
10563/**
10564 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10565 * Conditional VM-exit.
10566 */
10567HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10568{
10569 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10570
10571 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10572 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10573 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10574}
10575
10576
10577/**
10578 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10579 * Conditional VM-exit.
10580 */
10581HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10582{
10583 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10584
10585 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10586 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10587 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10588}
10589
10590
10591/**
10592 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10593 */
10594HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10595{
10596 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10597
10598 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10599 {
10600 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10601 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10602 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10603 }
10604 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10605}
10606
10607
10608/**
10609 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10610 */
10611HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10612{
10613 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10614
10615 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10616 {
10617 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10618 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10619 }
10620 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10621}
10622
10623
10624/**
10625 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10626 */
10627HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10628{
10629 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10630
10631 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10632 {
10633 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10634 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10635 | HMVMX_READ_EXIT_INSTR_INFO
10636 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10637 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10638 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10639 }
10640 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10641}
10642
10643
10644/**
10645 * Nested-guest VM-exit handler for invalid-guest state
10646 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10647 */
10648HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10649{
10650 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10651
10652 /*
10653 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10654 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10655 * Handle it like it's in an invalid guest state of the outer guest.
10656 *
10657 * When the fast path is implemented, this should be changed to cause the corresponding
10658 * nested-guest VM-exit.
10659 */
10660 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10661}
10662
10663
10664/**
10665 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10666 * and only provide the instruction length.
10667 *
10668 * Unconditional VM-exit.
10669 */
10670HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10671{
10672 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10673
10674#ifdef VBOX_STRICT
10675 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10676 switch (pVmxTransient->uExitReason)
10677 {
10678 case VMX_EXIT_ENCLS:
10679 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10680 break;
10681
10682 case VMX_EXIT_VMFUNC:
10683 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10684 break;
10685 }
10686#endif
10687
10688 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10689 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10690}
10691
10692
10693/**
10694 * Nested-guest VM-exit handler for instructions that provide instruction length as
10695 * well as more information.
10696 *
10697 * Unconditional VM-exit.
10698 */
10699HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10700{
10701 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10702
10703# ifdef VBOX_STRICT
10704 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10705 switch (pVmxTransient->uExitReason)
10706 {
10707 case VMX_EXIT_GDTR_IDTR_ACCESS:
10708 case VMX_EXIT_LDTR_TR_ACCESS:
10709 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10710 break;
10711
10712 case VMX_EXIT_RDRAND:
10713 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10714 break;
10715
10716 case VMX_EXIT_RDSEED:
10717 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10718 break;
10719
10720 case VMX_EXIT_XSAVES:
10721 case VMX_EXIT_XRSTORS:
10722 /** @todo NSTVMX: Verify XSS-bitmap. */
10723 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10724 break;
10725
10726 case VMX_EXIT_UMWAIT:
10727 case VMX_EXIT_TPAUSE:
10728 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10729 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10730 break;
10731
10732 case VMX_EXIT_LOADIWKEY:
10733 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10734 break;
10735 }
10736# endif
10737
10738 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10739 | HMVMX_READ_EXIT_INSTR_LEN
10740 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10741 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10742 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10743}
10744
10745# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10746
10747/**
10748 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10749 * Conditional VM-exit.
10750 */
10751HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10752{
10753 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10754 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10755
10756 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10757 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10758 {
10759 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10760 | HMVMX_READ_EXIT_INSTR_LEN
10761 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10762 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10763 | HMVMX_READ_IDT_VECTORING_INFO
10764 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10765 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10766 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10767 AssertRCReturn(rc, rc);
10768
10769 /*
10770 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10771 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10772 * it's its problem to deal with that issue and we'll clear the recovered event.
10773 */
10774 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10775 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10776 { /*likely*/ }
10777 else
10778 {
10779 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10780 return rcStrict;
10781 }
10782 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10783
10784 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10785 uint64_t const uExitQual = pVmxTransient->uExitQual;
10786
10787 RTGCPTR GCPtrNestedFault;
10788 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10789 if (fIsLinearAddrValid)
10790 {
10791 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10792 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10793 }
10794 else
10795 GCPtrNestedFault = 0;
10796
10797 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10798 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10799 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10800 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10801 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10802
10803 PGMPTWALK Walk;
10804 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10805 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10806 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10807 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10808 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10809 if (RT_SUCCESS(rcStrict))
10810 {
10811 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
10812 {
10813 Assert(!fClearEventOnForward);
10814 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
10815 rcStrict = VINF_EM_RESCHEDULE_REM;
10816 }
10817 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10818 return rcStrict;
10819 }
10820
10821 if (fClearEventOnForward)
10822 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10823
10824 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10825 pVmxTransient->uIdtVectoringErrorCode);
10826 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10827 {
10828 VMXVEXITINFO const ExitInfo
10829 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10830 pVmxTransient->uExitQual,
10831 pVmxTransient->cbExitInstr,
10832 pVmxTransient->uGuestLinearAddr,
10833 pVmxTransient->uGuestPhysicalAddr);
10834 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10835 }
10836
10837 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10838 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10839 }
10840
10841 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10842}
10843
10844
10845/**
10846 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10847 * Conditional VM-exit.
10848 */
10849HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10850{
10851 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10852 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10853
10854 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10855 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10856 {
10857 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10858 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10859 AssertRCReturn(rc, rc);
10860
10861 PGMPTWALK Walk;
10862 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10863 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10864 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10865 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10866 0 /* GCPtrNestedFault */, &Walk);
10867 if (RT_SUCCESS(rcStrict))
10868 {
10869 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10870 return rcStrict;
10871 }
10872
10873 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10874 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10875 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10876
10877 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10878 pVmxTransient->uIdtVectoringErrorCode);
10879 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10880 }
10881
10882 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10883}
10884
10885# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10886
10887/** @} */
10888#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10889
10890
10891/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10892 * probes.
10893 *
10894 * The following few functions and associated structure contains the bloat
10895 * necessary for providing detailed debug events and dtrace probes as well as
10896 * reliable host side single stepping. This works on the principle of
10897 * "subclassing" the normal execution loop and workers. We replace the loop
10898 * method completely and override selected helpers to add necessary adjustments
10899 * to their core operation.
10900 *
10901 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10902 * any performance for debug and analysis features.
10903 *
10904 * @{
10905 */
10906
10907/**
10908 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10909 * the debug run loop.
10910 */
10911typedef struct VMXRUNDBGSTATE
10912{
10913 /** The RIP we started executing at. This is for detecting that we stepped. */
10914 uint64_t uRipStart;
10915 /** The CS we started executing with. */
10916 uint16_t uCsStart;
10917
10918 /** Whether we've actually modified the 1st execution control field. */
10919 bool fModifiedProcCtls : 1;
10920 /** Whether we've actually modified the 2nd execution control field. */
10921 bool fModifiedProcCtls2 : 1;
10922 /** Whether we've actually modified the exception bitmap. */
10923 bool fModifiedXcptBitmap : 1;
10924
10925 /** We desire the modified the CR0 mask to be cleared. */
10926 bool fClearCr0Mask : 1;
10927 /** We desire the modified the CR4 mask to be cleared. */
10928 bool fClearCr4Mask : 1;
10929 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10930 uint32_t fCpe1Extra;
10931 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10932 uint32_t fCpe1Unwanted;
10933 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10934 uint32_t fCpe2Extra;
10935 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10936 uint32_t bmXcptExtra;
10937 /** The sequence number of the Dtrace provider settings the state was
10938 * configured against. */
10939 uint32_t uDtraceSettingsSeqNo;
10940 /** VM-exits to check (one bit per VM-exit). */
10941 uint32_t bmExitsToCheck[3];
10942
10943 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10944 uint32_t fProcCtlsInitial;
10945 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10946 uint32_t fProcCtls2Initial;
10947 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10948 uint32_t bmXcptInitial;
10949} VMXRUNDBGSTATE;
10950AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10951typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10952
10953
10954/**
10955 * Initializes the VMXRUNDBGSTATE structure.
10956 *
10957 * @param pVCpu The cross context virtual CPU structure of the
10958 * calling EMT.
10959 * @param pVmxTransient The VMX-transient structure.
10960 * @param pDbgState The debug state to initialize.
10961 */
10962static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10963{
10964 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10965 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10966
10967 pDbgState->fModifiedProcCtls = false;
10968 pDbgState->fModifiedProcCtls2 = false;
10969 pDbgState->fModifiedXcptBitmap = false;
10970 pDbgState->fClearCr0Mask = false;
10971 pDbgState->fClearCr4Mask = false;
10972 pDbgState->fCpe1Extra = 0;
10973 pDbgState->fCpe1Unwanted = 0;
10974 pDbgState->fCpe2Extra = 0;
10975 pDbgState->bmXcptExtra = 0;
10976 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10977 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10978 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10979}
10980
10981
10982/**
10983 * Updates the VMSC fields with changes requested by @a pDbgState.
10984 *
10985 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10986 * immediately before executing guest code, i.e. when interrupts are disabled.
10987 * We don't check status codes here as we cannot easily assert or return in the
10988 * latter case.
10989 *
10990 * @param pVCpu The cross context virtual CPU structure.
10991 * @param pVmxTransient The VMX-transient structure.
10992 * @param pDbgState The debug state.
10993 */
10994static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10995{
10996 /*
10997 * Ensure desired flags in VMCS control fields are set.
10998 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10999 *
11000 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11001 * there should be no stale data in pCtx at this point.
11002 */
11003 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11004 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11005 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11006 {
11007 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11008 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11009 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11010 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11011 pDbgState->fModifiedProcCtls = true;
11012 }
11013
11014 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11015 {
11016 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11017 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11018 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11019 pDbgState->fModifiedProcCtls2 = true;
11020 }
11021
11022 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11023 {
11024 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11025 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11026 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11027 pDbgState->fModifiedXcptBitmap = true;
11028 }
11029
11030 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11031 {
11032 pVmcsInfo->u64Cr0Mask = 0;
11033 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11034 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11035 }
11036
11037 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11038 {
11039 pVmcsInfo->u64Cr4Mask = 0;
11040 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11041 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11042 }
11043
11044 NOREF(pVCpu);
11045}
11046
11047
11048/**
11049 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11050 * re-entry next time around.
11051 *
11052 * @returns Strict VBox status code (i.e. informational status codes too).
11053 * @param pVCpu The cross context virtual CPU structure.
11054 * @param pVmxTransient The VMX-transient structure.
11055 * @param pDbgState The debug state.
11056 * @param rcStrict The return code from executing the guest using single
11057 * stepping.
11058 */
11059static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11060 VBOXSTRICTRC rcStrict)
11061{
11062 /*
11063 * Restore VM-exit control settings as we may not reenter this function the
11064 * next time around.
11065 */
11066 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11067
11068 /* We reload the initial value, trigger what we can of recalculations the
11069 next time around. From the looks of things, that's all that's required atm. */
11070 if (pDbgState->fModifiedProcCtls)
11071 {
11072 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11073 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11074 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11075 AssertRC(rc2);
11076 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11077 }
11078
11079 /* We're currently the only ones messing with this one, so just restore the
11080 cached value and reload the field. */
11081 if ( pDbgState->fModifiedProcCtls2
11082 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11083 {
11084 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11085 AssertRC(rc2);
11086 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11087 }
11088
11089 /* If we've modified the exception bitmap, we restore it and trigger
11090 reloading and partial recalculation the next time around. */
11091 if (pDbgState->fModifiedXcptBitmap)
11092 {
11093 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11094 AssertRC(rc2);
11095 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11096 }
11097
11098 return rcStrict;
11099}
11100
11101
11102/**
11103 * Configures VM-exit controls for current DBGF and DTrace settings.
11104 *
11105 * This updates @a pDbgState and the VMCS execution control fields to reflect
11106 * the necessary VM-exits demanded by DBGF and DTrace.
11107 *
11108 * @param pVCpu The cross context virtual CPU structure.
11109 * @param pVmxTransient The VMX-transient structure. May update
11110 * fUpdatedTscOffsettingAndPreemptTimer.
11111 * @param pDbgState The debug state.
11112 */
11113static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11114{
11115#ifndef IN_NEM_DARWIN
11116 /*
11117 * Take down the dtrace serial number so we can spot changes.
11118 */
11119 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11120 ASMCompilerBarrier();
11121#endif
11122
11123 /*
11124 * We'll rebuild most of the middle block of data members (holding the
11125 * current settings) as we go along here, so start by clearing it all.
11126 */
11127 pDbgState->bmXcptExtra = 0;
11128 pDbgState->fCpe1Extra = 0;
11129 pDbgState->fCpe1Unwanted = 0;
11130 pDbgState->fCpe2Extra = 0;
11131 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11132 pDbgState->bmExitsToCheck[i] = 0;
11133
11134 /*
11135 * Software interrupts (INT XXh) - no idea how to trigger these...
11136 */
11137 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11138 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11139 || VBOXVMM_INT_SOFTWARE_ENABLED())
11140 {
11141 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11142 }
11143
11144 /*
11145 * INT3 breakpoints - triggered by #BP exceptions.
11146 */
11147 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11148 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11149
11150 /*
11151 * Exception bitmap and XCPT events+probes.
11152 */
11153 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11154 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11155 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11156
11157 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11158 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11159 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11160 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11161 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11162 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11163 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11164 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11165 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11166 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11167 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11168 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11169 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11170 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11171 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11172 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11173 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11174 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11175
11176 if (pDbgState->bmXcptExtra)
11177 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11178
11179 /*
11180 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11181 *
11182 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11183 * So, when adding/changing/removing please don't forget to update it.
11184 *
11185 * Some of the macros are picking up local variables to save horizontal space,
11186 * (being able to see it in a table is the lesser evil here).
11187 */
11188#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11189 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11190 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11191#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11192 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11193 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11194 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11195 } else do { } while (0)
11196#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11197 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11198 { \
11199 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11200 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11201 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11202 } else do { } while (0)
11203#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11204 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11205 { \
11206 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11207 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11208 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11209 } else do { } while (0)
11210#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11211 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11212 { \
11213 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11214 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11215 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11216 } else do { } while (0)
11217
11218 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11219 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11220 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11221 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11222 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11223
11224 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11225 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11226 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11227 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11228 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11229 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11230 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11231 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11232 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11233 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11234 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11235 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11236 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11237 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11238 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11239 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11240 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11241 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11242 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11243 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11244 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11245 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11246 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11247 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11248 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11249 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11250 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11251 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11252 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11253 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11254 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11255 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11256 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11257 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11258 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11259 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11260
11261 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11262 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11263 {
11264 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11265 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11266 AssertRC(rc);
11267
11268#if 0 /** @todo fix me */
11269 pDbgState->fClearCr0Mask = true;
11270 pDbgState->fClearCr4Mask = true;
11271#endif
11272 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11273 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11274 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11275 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11276 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11277 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11278 require clearing here and in the loop if we start using it. */
11279 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11280 }
11281 else
11282 {
11283 if (pDbgState->fClearCr0Mask)
11284 {
11285 pDbgState->fClearCr0Mask = false;
11286 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11287 }
11288 if (pDbgState->fClearCr4Mask)
11289 {
11290 pDbgState->fClearCr4Mask = false;
11291 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11292 }
11293 }
11294 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11295 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11296
11297 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11298 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11299 {
11300 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11301 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11302 }
11303 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11304 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11305
11306 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11307 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11308 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11309 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11310 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11311 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11312 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11313 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11314#if 0 /** @todo too slow, fix handler. */
11315 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11316#endif
11317 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11318
11319 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11320 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11321 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11322 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11323 {
11324 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11325 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11326 }
11327 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11328 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11329 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11330 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11331
11332 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11333 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11334 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11335 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11336 {
11337 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11338 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11339 }
11340 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11341 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11342 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11343 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11344
11345 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11346 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11347 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11348 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11349 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11350 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11351 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11352 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11353 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11354 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11355 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11356 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11357 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11358 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11359 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11360 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11361 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11362 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11363 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11364 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11365 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11366 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11367
11368#undef IS_EITHER_ENABLED
11369#undef SET_ONLY_XBM_IF_EITHER_EN
11370#undef SET_CPE1_XBM_IF_EITHER_EN
11371#undef SET_CPEU_XBM_IF_EITHER_EN
11372#undef SET_CPE2_XBM_IF_EITHER_EN
11373
11374 /*
11375 * Sanitize the control stuff.
11376 */
11377 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11378 if (pDbgState->fCpe2Extra)
11379 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11380 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11381 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11382#ifndef IN_NEM_DARWIN
11383 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11384 {
11385 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11386 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11387 }
11388#else
11389 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11390 {
11391 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11392 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11393 }
11394#endif
11395
11396 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11397 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11398 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11399 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11400}
11401
11402
11403/**
11404 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11405 * appropriate.
11406 *
11407 * The caller has checked the VM-exit against the
11408 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11409 * already, so we don't have to do that either.
11410 *
11411 * @returns Strict VBox status code (i.e. informational status codes too).
11412 * @param pVCpu The cross context virtual CPU structure.
11413 * @param pVmxTransient The VMX-transient structure.
11414 * @param uExitReason The VM-exit reason.
11415 *
11416 * @remarks The name of this function is displayed by dtrace, so keep it short
11417 * and to the point. No longer than 33 chars long, please.
11418 */
11419static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11420{
11421 /*
11422 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11423 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11424 *
11425 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11426 * does. Must add/change/remove both places. Same ordering, please.
11427 *
11428 * Added/removed events must also be reflected in the next section
11429 * where we dispatch dtrace events.
11430 */
11431 bool fDtrace1 = false;
11432 bool fDtrace2 = false;
11433 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11434 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11435 uint32_t uEventArg = 0;
11436#define SET_EXIT(a_EventSubName) \
11437 do { \
11438 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11439 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11440 } while (0)
11441#define SET_BOTH(a_EventSubName) \
11442 do { \
11443 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11444 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11445 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11446 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11447 } while (0)
11448 switch (uExitReason)
11449 {
11450 case VMX_EXIT_MTF:
11451 return vmxHCExitMtf(pVCpu, pVmxTransient);
11452
11453 case VMX_EXIT_XCPT_OR_NMI:
11454 {
11455 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11456 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11457 {
11458 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11459 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11460 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11461 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11462 {
11463 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11464 {
11465 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11466 uEventArg = pVmxTransient->uExitIntErrorCode;
11467 }
11468 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11469 switch (enmEvent1)
11470 {
11471 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11472 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11473 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11474 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11475 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11476 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11477 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11478 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11479 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11480 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11481 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11482 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11483 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11484 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11485 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11486 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11487 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11488 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11489 default: break;
11490 }
11491 }
11492 else
11493 AssertFailed();
11494 break;
11495
11496 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11497 uEventArg = idxVector;
11498 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11499 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11500 break;
11501 }
11502 break;
11503 }
11504
11505 case VMX_EXIT_TRIPLE_FAULT:
11506 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11507 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11508 break;
11509 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11510 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11511 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11512 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11513 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11514
11515 /* Instruction specific VM-exits: */
11516 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11517 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11518 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11519 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11520 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11521 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11522 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11523 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11524 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11525 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11526 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11527 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11528 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11529 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11530 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11531 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11532 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11533 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11534 case VMX_EXIT_MOV_CRX:
11535 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11536 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11537 SET_BOTH(CRX_READ);
11538 else
11539 SET_BOTH(CRX_WRITE);
11540 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11541 break;
11542 case VMX_EXIT_MOV_DRX:
11543 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11544 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11545 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11546 SET_BOTH(DRX_READ);
11547 else
11548 SET_BOTH(DRX_WRITE);
11549 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11550 break;
11551 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11552 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11553 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11554 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11555 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11556 case VMX_EXIT_GDTR_IDTR_ACCESS:
11557 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11558 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11559 {
11560 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11561 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11562 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11563 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11564 }
11565 break;
11566
11567 case VMX_EXIT_LDTR_TR_ACCESS:
11568 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11569 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11570 {
11571 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11572 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11573 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11574 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11575 }
11576 break;
11577
11578 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11579 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11580 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11581 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11582 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11583 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11584 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11585 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11586 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11587 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11588 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11589
11590 /* Events that aren't relevant at this point. */
11591 case VMX_EXIT_EXT_INT:
11592 case VMX_EXIT_INT_WINDOW:
11593 case VMX_EXIT_NMI_WINDOW:
11594 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11595 case VMX_EXIT_PREEMPT_TIMER:
11596 case VMX_EXIT_IO_INSTR:
11597 break;
11598
11599 /* Errors and unexpected events. */
11600 case VMX_EXIT_INIT_SIGNAL:
11601 case VMX_EXIT_SIPI:
11602 case VMX_EXIT_IO_SMI:
11603 case VMX_EXIT_SMI:
11604 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11605 case VMX_EXIT_ERR_MSR_LOAD:
11606 case VMX_EXIT_ERR_MACHINE_CHECK:
11607 case VMX_EXIT_PML_FULL:
11608 case VMX_EXIT_VIRTUALIZED_EOI:
11609 break;
11610
11611 default:
11612 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11613 break;
11614 }
11615#undef SET_BOTH
11616#undef SET_EXIT
11617
11618 /*
11619 * Dtrace tracepoints go first. We do them here at once so we don't
11620 * have to copy the guest state saving and stuff a few dozen times.
11621 * Down side is that we've got to repeat the switch, though this time
11622 * we use enmEvent since the probes are a subset of what DBGF does.
11623 */
11624 if (fDtrace1 || fDtrace2)
11625 {
11626 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11627 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11628 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11629 switch (enmEvent1)
11630 {
11631 /** @todo consider which extra parameters would be helpful for each probe. */
11632 case DBGFEVENT_END: break;
11633 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11634 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11635 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11636 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11637 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11638 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11639 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11640 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11641 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11642 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11643 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11644 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11645 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11646 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11647 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11648 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11649 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11650 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11651 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11652 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11653 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11654 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11655 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11656 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11657 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11658 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11659 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11660 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11661 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11662 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11663 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11664 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11665 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11666 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11667 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11668 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11669 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11670 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11671 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11672 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11673 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11674 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11675 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11676 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11677 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11678 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11679 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11680 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11681 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11682 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11683 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11684 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11685 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11686 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11687 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11688 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11689 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11690 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11691 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11692 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11693 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11694 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11695 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11696 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11697 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11698 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11699 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11700 }
11701 switch (enmEvent2)
11702 {
11703 /** @todo consider which extra parameters would be helpful for each probe. */
11704 case DBGFEVENT_END: break;
11705 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11706 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11707 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11708 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11709 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11710 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11711 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11712 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11713 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11714 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11715 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11716 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11717 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11718 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11719 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11720 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11721 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11722 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11723 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11724 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11725 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11726 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11727 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11728 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11729 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11730 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11731 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11732 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11733 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11734 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11735 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11736 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11737 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11738 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11739 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11740 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11741 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11742 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11743 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11744 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11745 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11746 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11747 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11748 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11749 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11750 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11751 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11752 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11753 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11754 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11755 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11756 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11757 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11758 }
11759 }
11760
11761 /*
11762 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11763 * the DBGF call will do a full check).
11764 *
11765 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11766 * Note! If we have to events, we prioritize the first, i.e. the instruction
11767 * one, in order to avoid event nesting.
11768 */
11769 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11770 if ( enmEvent1 != DBGFEVENT_END
11771 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11772 {
11773 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11774 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11775 if (rcStrict != VINF_SUCCESS)
11776 return rcStrict;
11777 }
11778 else if ( enmEvent2 != DBGFEVENT_END
11779 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11780 {
11781 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11782 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11783 if (rcStrict != VINF_SUCCESS)
11784 return rcStrict;
11785 }
11786
11787 return VINF_SUCCESS;
11788}
11789
11790
11791/**
11792 * Single-stepping VM-exit filtering.
11793 *
11794 * This is preprocessing the VM-exits and deciding whether we've gotten far
11795 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11796 * handling is performed.
11797 *
11798 * @returns Strict VBox status code (i.e. informational status codes too).
11799 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11800 * @param pVmxTransient The VMX-transient structure.
11801 * @param pDbgState The debug state.
11802 */
11803DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11804{
11805 /*
11806 * Expensive (saves context) generic dtrace VM-exit probe.
11807 */
11808 uint32_t const uExitReason = pVmxTransient->uExitReason;
11809 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11810 { /* more likely */ }
11811 else
11812 {
11813 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11814 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11815 AssertRC(rc);
11816 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11817 }
11818
11819#ifndef IN_NEM_DARWIN
11820 /*
11821 * Check for host NMI, just to get that out of the way.
11822 */
11823 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11824 { /* normally likely */ }
11825 else
11826 {
11827 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11828 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11829 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11830 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11831 }
11832#endif
11833
11834 /*
11835 * Check for single stepping event if we're stepping.
11836 */
11837 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11838 {
11839 switch (uExitReason)
11840 {
11841 case VMX_EXIT_MTF:
11842 return vmxHCExitMtf(pVCpu, pVmxTransient);
11843
11844 /* Various events: */
11845 case VMX_EXIT_XCPT_OR_NMI:
11846 case VMX_EXIT_EXT_INT:
11847 case VMX_EXIT_TRIPLE_FAULT:
11848 case VMX_EXIT_INT_WINDOW:
11849 case VMX_EXIT_NMI_WINDOW:
11850 case VMX_EXIT_TASK_SWITCH:
11851 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11852 case VMX_EXIT_APIC_ACCESS:
11853 case VMX_EXIT_EPT_VIOLATION:
11854 case VMX_EXIT_EPT_MISCONFIG:
11855 case VMX_EXIT_PREEMPT_TIMER:
11856
11857 /* Instruction specific VM-exits: */
11858 case VMX_EXIT_CPUID:
11859 case VMX_EXIT_GETSEC:
11860 case VMX_EXIT_HLT:
11861 case VMX_EXIT_INVD:
11862 case VMX_EXIT_INVLPG:
11863 case VMX_EXIT_RDPMC:
11864 case VMX_EXIT_RDTSC:
11865 case VMX_EXIT_RSM:
11866 case VMX_EXIT_VMCALL:
11867 case VMX_EXIT_VMCLEAR:
11868 case VMX_EXIT_VMLAUNCH:
11869 case VMX_EXIT_VMPTRLD:
11870 case VMX_EXIT_VMPTRST:
11871 case VMX_EXIT_VMREAD:
11872 case VMX_EXIT_VMRESUME:
11873 case VMX_EXIT_VMWRITE:
11874 case VMX_EXIT_VMXOFF:
11875 case VMX_EXIT_VMXON:
11876 case VMX_EXIT_MOV_CRX:
11877 case VMX_EXIT_MOV_DRX:
11878 case VMX_EXIT_IO_INSTR:
11879 case VMX_EXIT_RDMSR:
11880 case VMX_EXIT_WRMSR:
11881 case VMX_EXIT_MWAIT:
11882 case VMX_EXIT_MONITOR:
11883 case VMX_EXIT_PAUSE:
11884 case VMX_EXIT_GDTR_IDTR_ACCESS:
11885 case VMX_EXIT_LDTR_TR_ACCESS:
11886 case VMX_EXIT_INVEPT:
11887 case VMX_EXIT_RDTSCP:
11888 case VMX_EXIT_INVVPID:
11889 case VMX_EXIT_WBINVD:
11890 case VMX_EXIT_XSETBV:
11891 case VMX_EXIT_RDRAND:
11892 case VMX_EXIT_INVPCID:
11893 case VMX_EXIT_VMFUNC:
11894 case VMX_EXIT_RDSEED:
11895 case VMX_EXIT_XSAVES:
11896 case VMX_EXIT_XRSTORS:
11897 {
11898 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11899 AssertRCReturn(rc, rc);
11900 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11901 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11902 return VINF_EM_DBG_STEPPED;
11903 break;
11904 }
11905
11906 /* Errors and unexpected events: */
11907 case VMX_EXIT_INIT_SIGNAL:
11908 case VMX_EXIT_SIPI:
11909 case VMX_EXIT_IO_SMI:
11910 case VMX_EXIT_SMI:
11911 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11912 case VMX_EXIT_ERR_MSR_LOAD:
11913 case VMX_EXIT_ERR_MACHINE_CHECK:
11914 case VMX_EXIT_PML_FULL:
11915 case VMX_EXIT_VIRTUALIZED_EOI:
11916 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11917 break;
11918
11919 default:
11920 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11921 break;
11922 }
11923 }
11924
11925 /*
11926 * Check for debugger event breakpoints and dtrace probes.
11927 */
11928 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11929 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11930 {
11931 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11932 if (rcStrict != VINF_SUCCESS)
11933 return rcStrict;
11934 }
11935
11936 /*
11937 * Normal processing.
11938 */
11939#ifdef HMVMX_USE_FUNCTION_TABLE
11940 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11941#else
11942 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11943#endif
11944}
11945
11946/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette