VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 102020

Last change on this file since 102020 was 102020, checked in by vboxsync, 17 months ago

VMM: Nested VMX: bugref:10318 Discard unused VMCS launch-state bits and update 'VMCLEAR' VMCS launch-state bit to be 0.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 526.2 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 102020 2023-11-09 11:27:42Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 return VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937}
938
939
940/**
941 * Clears the shadow VMCS specified by the VMCS info. object.
942 *
943 * @returns VBox status code.
944 * @param pVmcsInfo The VMCS info. object.
945 *
946 * @remarks Can be called with interrupts disabled.
947 */
948static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
949{
950 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
951 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
952
953 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
954 if (RT_SUCCESS(rc))
955 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
956 return rc;
957}
958
959
960/**
961 * Switches from and to the specified VMCSes.
962 *
963 * @returns VBox status code.
964 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
965 * @param pVmcsInfoTo The VMCS info. object we are switching to.
966 *
967 * @remarks Called with interrupts disabled.
968 */
969static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
970{
971 /*
972 * Clear the VMCS we are switching out if it has not already been cleared.
973 * This will sync any CPU internal data back to the VMCS.
974 */
975 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
976 {
977 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
978 if (RT_SUCCESS(rc))
979 {
980 /*
981 * The shadow VMCS, if any, would not be active at this point since we
982 * would have cleared it while importing the virtual hardware-virtualization
983 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
984 * clear the shadow VMCS here, just assert for safety.
985 */
986 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
987 }
988 else
989 return rc;
990 }
991
992 /*
993 * Clear the VMCS we are switching to if it has not already been cleared.
994 * This will initialize the VMCS launch state to "clear" required for loading it.
995 *
996 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
997 */
998 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
999 {
1000 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1001 if (RT_SUCCESS(rc))
1002 { /* likely */ }
1003 else
1004 return rc;
1005 }
1006
1007 /*
1008 * Finally, load the VMCS we are switching to.
1009 */
1010 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1011}
1012
1013
1014/**
1015 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1016 * caller.
1017 *
1018 * @returns VBox status code.
1019 * @param pVCpu The cross context virtual CPU structure.
1020 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1021 * true) or guest VMCS (pass false).
1022 */
1023static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1024{
1025 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1026 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1027
1028 PVMXVMCSINFO pVmcsInfoFrom;
1029 PVMXVMCSINFO pVmcsInfoTo;
1030 if (fSwitchToNstGstVmcs)
1031 {
1032 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1033 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1034 }
1035 else
1036 {
1037 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1038 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1039 }
1040
1041 /*
1042 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1043 * preemption hook code path acquires the current VMCS.
1044 */
1045 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1046
1047 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1048 if (RT_SUCCESS(rc))
1049 {
1050 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1051 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1052
1053 /*
1054 * If we are switching to a VMCS that was executed on a different host CPU or was
1055 * never executed before, flag that we need to export the host state before executing
1056 * guest/nested-guest code using hardware-assisted VMX.
1057 *
1058 * This could probably be done in a preemptible context since the preemption hook
1059 * will flag the necessary change in host context. However, since preemption is
1060 * already disabled and to avoid making assumptions about host specific code in
1061 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1062 * disabled.
1063 */
1064 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1065 { /* likely */ }
1066 else
1067 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1068
1069 ASMSetFlags(fEFlags);
1070
1071 /*
1072 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1073 * flag that we need to update the host MSR values there. Even if we decide in the
1074 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1075 * if its content differs, we would have to update the host MSRs anyway.
1076 */
1077 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1078 }
1079 else
1080 ASMSetFlags(fEFlags);
1081 return rc;
1082}
1083
1084#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1085#ifdef VBOX_STRICT
1086
1087/**
1088 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1089 * transient structure.
1090 *
1091 * @param pVCpu The cross context virtual CPU structure.
1092 * @param pVmxTransient The VMX-transient structure.
1093 */
1094DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1095{
1096 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1097 AssertRC(rc);
1098}
1099
1100
1101/**
1102 * Reads the VM-entry exception error code field from the VMCS into
1103 * the VMX transient structure.
1104 *
1105 * @param pVCpu The cross context virtual CPU structure.
1106 * @param pVmxTransient The VMX-transient structure.
1107 */
1108DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1109{
1110 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1111 AssertRC(rc);
1112}
1113
1114
1115/**
1116 * Reads the VM-entry exception error code field from the VMCS into
1117 * the VMX transient structure.
1118 *
1119 * @param pVCpu The cross context virtual CPU structure.
1120 * @param pVmxTransient The VMX-transient structure.
1121 */
1122DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1123{
1124 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1125 AssertRC(rc);
1126}
1127
1128#endif /* VBOX_STRICT */
1129
1130
1131/**
1132 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1133 *
1134 * Don't call directly unless the it's likely that some or all of the fields
1135 * given in @a a_fReadMask have already been read.
1136 *
1137 * @tparam a_fReadMask The fields to read.
1138 * @param pVCpu The cross context virtual CPU structure.
1139 * @param pVmxTransient The VMX-transient structure.
1140 */
1141template<uint32_t const a_fReadMask>
1142static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1143{
1144 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1145 | HMVMX_READ_EXIT_INSTR_LEN
1146 | HMVMX_READ_EXIT_INSTR_INFO
1147 | HMVMX_READ_IDT_VECTORING_INFO
1148 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1149 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1150 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1151 | HMVMX_READ_GUEST_LINEAR_ADDR
1152 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1153 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1154 )) == 0);
1155
1156 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1157 {
1158 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1159
1160 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1161 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1162 {
1163 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1164 AssertRC(rc);
1165 }
1166 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1167 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1168 {
1169 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1170 AssertRC(rc);
1171 }
1172 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1173 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1174 {
1175 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1176 AssertRC(rc);
1177 }
1178 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1179 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1180 {
1181 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1182 AssertRC(rc);
1183 }
1184 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1185 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1186 {
1187 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1188 AssertRC(rc);
1189 }
1190 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1191 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1192 {
1193 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1194 AssertRC(rc);
1195 }
1196 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1197 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1198 {
1199 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1200 AssertRC(rc);
1201 }
1202 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1203 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1204 {
1205 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1206 AssertRC(rc);
1207 }
1208 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1209 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1210 {
1211 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1212 AssertRC(rc);
1213 }
1214 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1215 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1216 {
1217 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1218 AssertRC(rc);
1219 }
1220
1221 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1222 }
1223}
1224
1225
1226/**
1227 * Reads VMCS fields into the VMXTRANSIENT structure.
1228 *
1229 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1230 * generating an optimized read sequences w/o any conditionals between in
1231 * non-strict builds.
1232 *
1233 * @tparam a_fReadMask The fields to read. One or more of the
1234 * HMVMX_READ_XXX fields ORed together.
1235 * @param pVCpu The cross context virtual CPU structure.
1236 * @param pVmxTransient The VMX-transient structure.
1237 */
1238template<uint32_t const a_fReadMask>
1239DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1240{
1241 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1242 | HMVMX_READ_EXIT_INSTR_LEN
1243 | HMVMX_READ_EXIT_INSTR_INFO
1244 | HMVMX_READ_IDT_VECTORING_INFO
1245 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1246 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1247 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1248 | HMVMX_READ_GUEST_LINEAR_ADDR
1249 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1250 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1251 )) == 0);
1252
1253 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1254 {
1255 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1256 {
1257 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1258 AssertRC(rc);
1259 }
1260 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1261 {
1262 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1263 AssertRC(rc);
1264 }
1265 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1266 {
1267 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1268 AssertRC(rc);
1269 }
1270 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1271 {
1272 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1273 AssertRC(rc);
1274 }
1275 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1276 {
1277 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1278 AssertRC(rc);
1279 }
1280 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1281 {
1282 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1283 AssertRC(rc);
1284 }
1285 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1286 {
1287 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1288 AssertRC(rc);
1289 }
1290 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1291 {
1292 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1293 AssertRC(rc);
1294 }
1295 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1296 {
1297 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1298 AssertRC(rc);
1299 }
1300 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1301 {
1302 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1303 AssertRC(rc);
1304 }
1305
1306 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1307 }
1308 else
1309 {
1310 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1311 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1312 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1313 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1314 }
1315}
1316
1317
1318#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1319/**
1320 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1321 *
1322 * @param pVCpu The cross context virtual CPU structure.
1323 * @param pVmxTransient The VMX-transient structure.
1324 */
1325static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1326{
1327 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1328 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1329 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1330 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1334 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1335 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1336 AssertRC(rc);
1337 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1338 | HMVMX_READ_EXIT_INSTR_LEN
1339 | HMVMX_READ_EXIT_INSTR_INFO
1340 | HMVMX_READ_IDT_VECTORING_INFO
1341 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1342 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1343 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1344 | HMVMX_READ_GUEST_LINEAR_ADDR
1345 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1346}
1347#endif
1348
1349/**
1350 * Verifies that our cached values of the VMCS fields are all consistent with
1351 * what's actually present in the VMCS.
1352 *
1353 * @returns VBox status code.
1354 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1355 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1356 * VMCS content. HMCPU error-field is
1357 * updated, see VMX_VCI_XXX.
1358 * @param pVCpu The cross context virtual CPU structure.
1359 * @param pVmcsInfo The VMCS info. object.
1360 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1361 */
1362static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1363{
1364 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1365
1366 uint32_t u32Val;
1367 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1368 AssertRC(rc);
1369 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1370 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1371 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1372 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1373
1374 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1375 AssertRC(rc);
1376 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1377 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1378 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1379 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1380
1381 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1382 AssertRC(rc);
1383 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1384 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1385 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1386 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1387
1388 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1389 AssertRC(rc);
1390 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1391 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1392 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1393 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1394
1395 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1396 {
1397 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1398 AssertRC(rc);
1399 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1400 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1401 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1402 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1403 }
1404
1405 uint64_t u64Val;
1406 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1407 {
1408 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1409 AssertRC(rc);
1410 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1411 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1412 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1413 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1414 }
1415
1416 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1417 AssertRC(rc);
1418 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1419 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1420 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1421 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1422
1423 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1424 AssertRC(rc);
1425 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1426 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1427 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1428 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1429
1430 NOREF(pcszVmcs);
1431 return VINF_SUCCESS;
1432}
1433
1434
1435/**
1436 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1437 * VMCS.
1438 *
1439 * This is typically required when the guest changes paging mode.
1440 *
1441 * @returns VBox status code.
1442 * @param pVCpu The cross context virtual CPU structure.
1443 * @param pVmxTransient The VMX-transient structure.
1444 *
1445 * @remarks Requires EFER.
1446 * @remarks No-long-jump zone!!!
1447 */
1448static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1449{
1450 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1451 {
1452 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1453 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1454
1455 /*
1456 * VM-entry controls.
1457 */
1458 {
1459 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1460 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1461
1462 /*
1463 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1464 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1465 *
1466 * For nested-guests, this is a mandatory VM-entry control. It's also
1467 * required because we do not want to leak host bits to the nested-guest.
1468 */
1469 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1470
1471 /*
1472 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1473 *
1474 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1475 * required to get the nested-guest working with hardware-assisted VMX execution.
1476 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1477 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1478 * here rather than while merging the guest VMCS controls.
1479 */
1480 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1481 {
1482 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1483 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1484 }
1485 else
1486 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1487
1488 /*
1489 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1490 *
1491 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1492 * regardless of whether the nested-guest VMCS specifies it because we are free to
1493 * load whatever MSRs we require and we do not need to modify the guest visible copy
1494 * of the VM-entry MSR load area.
1495 */
1496 if ( g_fHmVmxSupportsVmcsEfer
1497#ifndef IN_NEM_DARWIN
1498 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1499#endif
1500 )
1501 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1502 else
1503 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1504
1505 /*
1506 * The following should -not- be set (since we're not in SMM mode):
1507 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1508 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1509 */
1510
1511 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1512 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1513
1514 if ((fVal & fZap) == fVal)
1515 { /* likely */ }
1516 else
1517 {
1518 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1519 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1520 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1521 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1522 }
1523
1524 /* Commit it to the VMCS. */
1525 if (pVmcsInfo->u32EntryCtls != fVal)
1526 {
1527 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1528 AssertRC(rc);
1529 pVmcsInfo->u32EntryCtls = fVal;
1530 }
1531 }
1532
1533 /*
1534 * VM-exit controls.
1535 */
1536 {
1537 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1538 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1539
1540 /*
1541 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1542 * supported the 1-setting of this bit.
1543 *
1544 * For nested-guests, we set the "save debug controls" as the converse
1545 * "load debug controls" is mandatory for nested-guests anyway.
1546 */
1547 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1548
1549 /*
1550 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1551 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1552 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1553 * vmxHCExportHostMsrs().
1554 *
1555 * For nested-guests, we always set this bit as we do not support 32-bit
1556 * hosts.
1557 */
1558 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1559
1560#ifndef IN_NEM_DARWIN
1561 /*
1562 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1563 *
1564 * For nested-guests, we should use the "save IA32_EFER" control if we also
1565 * used the "load IA32_EFER" control while exporting VM-entry controls.
1566 */
1567 if ( g_fHmVmxSupportsVmcsEfer
1568 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1569 {
1570 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1571 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1572 }
1573#endif
1574
1575 /*
1576 * Enable saving of the VMX-preemption timer value on VM-exit.
1577 * For nested-guests, currently not exposed/used.
1578 */
1579 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1580 * the timer value. */
1581 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1582 {
1583 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1584 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1585 }
1586
1587 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1588 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1589
1590 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1591 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1592 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1593
1594 if ((fVal & fZap) == fVal)
1595 { /* likely */ }
1596 else
1597 {
1598 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1599 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1600 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1601 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1602 }
1603
1604 /* Commit it to the VMCS. */
1605 if (pVmcsInfo->u32ExitCtls != fVal)
1606 {
1607 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1608 AssertRC(rc);
1609 pVmcsInfo->u32ExitCtls = fVal;
1610 }
1611 }
1612
1613 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1614 }
1615 return VINF_SUCCESS;
1616}
1617
1618
1619/**
1620 * Sets the TPR threshold in the VMCS.
1621 *
1622 * @param pVCpu The cross context virtual CPU structure.
1623 * @param pVmcsInfo The VMCS info. object.
1624 * @param u32TprThreshold The TPR threshold (task-priority class only).
1625 */
1626DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1627{
1628 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1629 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1630 RT_NOREF(pVmcsInfo);
1631 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1632 AssertRC(rc);
1633}
1634
1635
1636/**
1637 * Exports the guest APIC TPR state into the VMCS.
1638 *
1639 * @param pVCpu The cross context virtual CPU structure.
1640 * @param pVmxTransient The VMX-transient structure.
1641 *
1642 * @remarks No-long-jump zone!!!
1643 */
1644static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1645{
1646 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1647 {
1648 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1649
1650 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1651 if (!pVmxTransient->fIsNestedGuest)
1652 {
1653 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1654 && APICIsEnabled(pVCpu))
1655 {
1656 /*
1657 * Setup TPR shadowing.
1658 */
1659 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1660 {
1661 bool fPendingIntr = false;
1662 uint8_t u8Tpr = 0;
1663 uint8_t u8PendingIntr = 0;
1664 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1665 AssertRC(rc);
1666
1667 /*
1668 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1669 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1670 * priority of the pending interrupt so we can deliver the interrupt. If there
1671 * are no interrupts pending, set threshold to 0 to not cause any
1672 * TPR-below-threshold VM-exits.
1673 */
1674 uint32_t u32TprThreshold = 0;
1675 if (fPendingIntr)
1676 {
1677 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1678 (which is the Task-Priority Class). */
1679 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1680 const uint8_t u8TprPriority = u8Tpr >> 4;
1681 if (u8PendingPriority <= u8TprPriority)
1682 u32TprThreshold = u8PendingPriority;
1683 }
1684
1685 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1686 }
1687 }
1688 }
1689 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1690 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1691 }
1692}
1693
1694
1695/**
1696 * Gets the guest interruptibility-state and updates related internal eflags
1697 * inhibition state.
1698 *
1699 * @returns Guest's interruptibility-state.
1700 * @param pVCpu The cross context virtual CPU structure.
1701 *
1702 * @remarks No-long-jump zone!!!
1703 */
1704static uint32_t vmxHCGetGuestIntrStateWithUpdate(PVMCPUCC pVCpu)
1705{
1706 uint32_t fIntrState;
1707
1708 /*
1709 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1710 */
1711 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1712 fIntrState = 0;
1713 else
1714 {
1715 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1716 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1717
1718 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1719 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1720 else
1721 {
1722 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1723
1724 /* Block-by-STI must not be set when interrupts are disabled. */
1725 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1726 }
1727 }
1728
1729 /*
1730 * Check if we should inhibit NMI delivery.
1731 */
1732 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1733 { /* likely */ }
1734 else
1735 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1736
1737 /*
1738 * Validate.
1739 */
1740 /* We don't support block-by-SMI yet.*/
1741 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1742
1743 return fIntrState;
1744}
1745
1746
1747/**
1748 * Exports the exception intercepts required for guest execution in the VMCS.
1749 *
1750 * @param pVCpu The cross context virtual CPU structure.
1751 * @param pVmxTransient The VMX-transient structure.
1752 *
1753 * @remarks No-long-jump zone!!!
1754 */
1755static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1756{
1757 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1758 {
1759 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1760 if ( !pVmxTransient->fIsNestedGuest
1761 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1762 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1763 else
1764 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1765
1766 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1767 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1768 }
1769}
1770
1771
1772/**
1773 * Exports the guest's RIP into the guest-state area in the VMCS.
1774 *
1775 * @param pVCpu The cross context virtual CPU structure.
1776 *
1777 * @remarks No-long-jump zone!!!
1778 */
1779static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1780{
1781 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1782 {
1783 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1784
1785 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1786 AssertRC(rc);
1787
1788 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1789 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1790 }
1791}
1792
1793
1794/**
1795 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1796 *
1797 * @param pVCpu The cross context virtual CPU structure.
1798 * @param pVmxTransient The VMX-transient structure.
1799 *
1800 * @remarks No-long-jump zone!!!
1801 */
1802static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1803{
1804 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1805 {
1806 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1807
1808 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1809 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1810 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1811 Use 32-bit VMWRITE. */
1812 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1813 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1814 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1815
1816#ifndef IN_NEM_DARWIN
1817 /*
1818 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1819 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1820 * can run the real-mode guest code under Virtual 8086 mode.
1821 */
1822 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1823 if (pVmcsInfo->RealMode.fRealOnV86Active)
1824 {
1825 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1826 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1827 Assert(!pVmxTransient->fIsNestedGuest);
1828 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1829 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1830 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1831 }
1832#else
1833 RT_NOREF(pVmxTransient);
1834#endif
1835
1836 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1837 AssertRC(rc);
1838
1839 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1840 Log4Func(("eflags=%#RX32\n", fEFlags));
1841 }
1842}
1843
1844
1845#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1846/**
1847 * Copies the nested-guest VMCS to the shadow VMCS.
1848 *
1849 * @returns VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure.
1851 * @param pVmcsInfo The VMCS info. object.
1852 *
1853 * @remarks No-long-jump zone!!!
1854 */
1855static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1856{
1857 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1858 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1859
1860 /*
1861 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1862 * current VMCS, as we may try saving guest lazy MSRs.
1863 *
1864 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1865 * calling the import VMCS code which is currently performing the guest MSR reads
1866 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1867 * and the rest of the VMX leave session machinery.
1868 */
1869 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1870
1871 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1872 if (RT_SUCCESS(rc))
1873 {
1874 /*
1875 * Copy all guest read/write VMCS fields.
1876 *
1877 * We don't check for VMWRITE failures here for performance reasons and
1878 * because they are not expected to fail, barring irrecoverable conditions
1879 * like hardware errors.
1880 */
1881 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1882 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1883 {
1884 uint64_t u64Val;
1885 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1886 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1887 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1888 }
1889
1890 /*
1891 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1892 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1893 */
1894 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1895 {
1896 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1897 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1898 {
1899 uint64_t u64Val;
1900 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1901 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1902 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1903 }
1904 }
1905
1906 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1907 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1908 }
1909
1910 ASMSetFlags(fEFlags);
1911 return rc;
1912}
1913
1914
1915/**
1916 * Copies the shadow VMCS to the nested-guest VMCS.
1917 *
1918 * @returns VBox status code.
1919 * @param pVCpu The cross context virtual CPU structure.
1920 * @param pVmcsInfo The VMCS info. object.
1921 *
1922 * @remarks Called with interrupts disabled.
1923 */
1924static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1925{
1926 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1927 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1928 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1929
1930 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1931 if (RT_SUCCESS(rc))
1932 {
1933 /*
1934 * Copy guest read/write fields from the shadow VMCS.
1935 * Guest read-only fields cannot be modified, so no need to copy them.
1936 *
1937 * We don't check for VMREAD failures here for performance reasons and
1938 * because they are not expected to fail, barring irrecoverable conditions
1939 * like hardware errors.
1940 */
1941 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1942 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1943 {
1944 uint64_t u64Val;
1945 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1946 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1947 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1948 }
1949
1950 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1951 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1952 }
1953 return rc;
1954}
1955
1956
1957/**
1958 * Enables VMCS shadowing for the given VMCS info. object.
1959 *
1960 * @param pVCpu The cross context virtual CPU structure.
1961 * @param pVmcsInfo The VMCS info. object.
1962 *
1963 * @remarks No-long-jump zone!!!
1964 */
1965static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1966{
1967 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1968 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1969 {
1970 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1971 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1972 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1973 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1974 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1975 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1976 Log4Func(("Enabled\n"));
1977 }
1978}
1979
1980
1981/**
1982 * Disables VMCS shadowing for the given VMCS info. object.
1983 *
1984 * @param pVCpu The cross context virtual CPU structure.
1985 * @param pVmcsInfo The VMCS info. object.
1986 *
1987 * @remarks No-long-jump zone!!!
1988 */
1989static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1990{
1991 /*
1992 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1993 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1994 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1995 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1996 *
1997 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
1998 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
1999 */
2000 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2001 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2002 {
2003 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2004 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2005 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2006 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2007 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2008 Log4Func(("Disabled\n"));
2009 }
2010}
2011#endif
2012
2013
2014/**
2015 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2016 *
2017 * The guest FPU state is always pre-loaded hence we don't need to bother about
2018 * sharing FPU related CR0 bits between the guest and host.
2019 *
2020 * @returns VBox status code.
2021 * @param pVCpu The cross context virtual CPU structure.
2022 * @param pVmxTransient The VMX-transient structure.
2023 *
2024 * @remarks No-long-jump zone!!!
2025 */
2026static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2027{
2028 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2029 {
2030 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2031 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2032
2033 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2034 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2035 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2036 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2037 else
2038 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2039
2040 if (!pVmxTransient->fIsNestedGuest)
2041 {
2042 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2043 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2044 uint64_t const u64ShadowCr0 = u64GuestCr0;
2045 Assert(!RT_HI_U32(u64GuestCr0));
2046
2047 /*
2048 * Setup VT-x's view of the guest CR0.
2049 */
2050 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2051 if (VM_IS_VMX_NESTED_PAGING(pVM))
2052 {
2053#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2054 if (CPUMIsGuestPagingEnabled(pVCpu))
2055 {
2056 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2057 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2058 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2059 }
2060 else
2061 {
2062 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2063 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2064 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2065 }
2066
2067 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2068 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2069 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2070#endif
2071 }
2072 else
2073 {
2074 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2075 u64GuestCr0 |= X86_CR0_WP;
2076 }
2077
2078 /*
2079 * Guest FPU bits.
2080 *
2081 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2082 * using CR0.TS.
2083 *
2084 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2085 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2086 */
2087 u64GuestCr0 |= X86_CR0_NE;
2088
2089 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2090 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2091
2092 /*
2093 * Update exception intercepts.
2094 */
2095 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2096#ifndef IN_NEM_DARWIN
2097 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2098 {
2099 Assert(PDMVmmDevHeapIsEnabled(pVM));
2100 Assert(pVM->hm.s.vmx.pRealModeTSS);
2101 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2102 }
2103 else
2104#endif
2105 {
2106 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2107 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2108 if (fInterceptMF)
2109 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2110 }
2111
2112 /* Additional intercepts for debugging, define these yourself explicitly. */
2113#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2114 uXcptBitmap |= 0
2115 | RT_BIT(X86_XCPT_BP)
2116 | RT_BIT(X86_XCPT_DE)
2117 | RT_BIT(X86_XCPT_NM)
2118 | RT_BIT(X86_XCPT_TS)
2119 | RT_BIT(X86_XCPT_UD)
2120 | RT_BIT(X86_XCPT_NP)
2121 | RT_BIT(X86_XCPT_SS)
2122 | RT_BIT(X86_XCPT_GP)
2123 | RT_BIT(X86_XCPT_PF)
2124 | RT_BIT(X86_XCPT_MF)
2125 ;
2126#elif defined(HMVMX_ALWAYS_TRAP_PF)
2127 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2128#endif
2129 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2130 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2131 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2133 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2134
2135 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2136 u64GuestCr0 |= fSetCr0;
2137 u64GuestCr0 &= fZapCr0;
2138 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2139
2140 Assert(!RT_HI_U32(u64GuestCr0));
2141 Assert(u64GuestCr0 & X86_CR0_NE);
2142
2143 /* Commit the CR0 and related fields to the guest VMCS. */
2144 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2145 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2146 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2147 {
2148 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2149 AssertRC(rc);
2150 }
2151 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2152 {
2153 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2154 AssertRC(rc);
2155 }
2156
2157 /* Update our caches. */
2158 pVmcsInfo->u32ProcCtls = uProcCtls;
2159 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2160
2161 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2162 }
2163 else
2164 {
2165 /*
2166 * With nested-guests, we may have extended the guest/host mask here since we
2167 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2168 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2169 * originally supplied. We must copy those bits from the nested-guest CR0 into
2170 * the nested-guest CR0 read-shadow.
2171 */
2172 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2173 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2174 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2175
2176 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2177 u64GuestCr0 |= fSetCr0;
2178 u64GuestCr0 &= fZapCr0;
2179 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2180
2181 Assert(!RT_HI_U32(u64GuestCr0));
2182 Assert(u64GuestCr0 & X86_CR0_NE);
2183
2184 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2185 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2186 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2187
2188 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2189 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2190 }
2191
2192 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2193 }
2194
2195 return VINF_SUCCESS;
2196}
2197
2198
2199/**
2200 * Exports the guest control registers (CR3, CR4) into the guest-state area
2201 * in the VMCS.
2202 *
2203 * @returns VBox strict status code.
2204 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2205 * without unrestricted guest access and the VMMDev is not presently
2206 * mapped (e.g. EFI32).
2207 *
2208 * @param pVCpu The cross context virtual CPU structure.
2209 * @param pVmxTransient The VMX-transient structure.
2210 *
2211 * @remarks No-long-jump zone!!!
2212 */
2213static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2214{
2215 int rc = VINF_SUCCESS;
2216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2217
2218 /*
2219 * Guest CR2.
2220 * It's always loaded in the assembler code. Nothing to do here.
2221 */
2222
2223 /*
2224 * Guest CR3.
2225 */
2226 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2227 {
2228 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2229
2230 if (VM_IS_VMX_NESTED_PAGING(pVM))
2231 {
2232#ifndef IN_NEM_DARWIN
2233 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2234 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2235
2236 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2237 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2238 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2239 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2240
2241 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2242 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2243 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2244
2245 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2246 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2247 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2248 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2249 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2250 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2251 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2252
2253 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2254 AssertRC(rc);
2255#endif
2256
2257 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2258 uint64_t u64GuestCr3 = pCtx->cr3;
2259 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2260 || CPUMIsGuestPagingEnabledEx(pCtx))
2261 {
2262 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2263 if (CPUMIsGuestInPAEModeEx(pCtx))
2264 {
2265 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2266 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2269 }
2270
2271 /*
2272 * The guest's view of its CR3 is unblemished with nested paging when the
2273 * guest is using paging or we have unrestricted guest execution to handle
2274 * the guest when it's not using paging.
2275 */
2276 }
2277#ifndef IN_NEM_DARWIN
2278 else
2279 {
2280 /*
2281 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2282 * thinks it accesses physical memory directly, we use our identity-mapped
2283 * page table to map guest-linear to guest-physical addresses. EPT takes care
2284 * of translating it to host-physical addresses.
2285 */
2286 RTGCPHYS GCPhys;
2287 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2288
2289 /* We obtain it here every time as the guest could have relocated this PCI region. */
2290 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2291 if (RT_SUCCESS(rc))
2292 { /* likely */ }
2293 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2294 {
2295 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2296 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2297 }
2298 else
2299 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2300
2301 u64GuestCr3 = GCPhys;
2302 }
2303#endif
2304
2305 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2306 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2307 AssertRC(rc);
2308 }
2309 else
2310 {
2311 Assert(!pVmxTransient->fIsNestedGuest);
2312 /* Non-nested paging case, just use the hypervisor's CR3. */
2313 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2314
2315 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2316 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2317 AssertRC(rc);
2318 }
2319
2320 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2321 }
2322
2323 /*
2324 * Guest CR4.
2325 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2326 */
2327 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2328 {
2329 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2330 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2331
2332 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2333 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2334
2335 /*
2336 * With nested-guests, we may have extended the guest/host mask here (since we
2337 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2338 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2339 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2340 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2341 */
2342 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2343 uint64_t u64GuestCr4 = pCtx->cr4;
2344 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2345 ? pCtx->cr4
2346 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2347 Assert(!RT_HI_U32(u64GuestCr4));
2348
2349#ifndef IN_NEM_DARWIN
2350 /*
2351 * Setup VT-x's view of the guest CR4.
2352 *
2353 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2354 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2355 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2356 *
2357 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2358 */
2359 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2360 {
2361 Assert(pVM->hm.s.vmx.pRealModeTSS);
2362 Assert(PDMVmmDevHeapIsEnabled(pVM));
2363 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2364 }
2365#endif
2366
2367 if (VM_IS_VMX_NESTED_PAGING(pVM))
2368 {
2369 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2370 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2371 {
2372 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2373 u64GuestCr4 |= X86_CR4_PSE;
2374 /* Our identity mapping is a 32-bit page directory. */
2375 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2376 }
2377 /* else use guest CR4.*/
2378 }
2379 else
2380 {
2381 Assert(!pVmxTransient->fIsNestedGuest);
2382
2383 /*
2384 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2385 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2386 */
2387 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2388 {
2389 case PGMMODE_REAL: /* Real-mode. */
2390 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2391 case PGMMODE_32_BIT: /* 32-bit paging. */
2392 {
2393 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2394 break;
2395 }
2396
2397 case PGMMODE_PAE: /* PAE paging. */
2398 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2399 {
2400 u64GuestCr4 |= X86_CR4_PAE;
2401 break;
2402 }
2403
2404 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2405 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2406 {
2407#ifdef VBOX_WITH_64_BITS_GUESTS
2408 /* For our assumption in vmxHCShouldSwapEferMsr. */
2409 Assert(u64GuestCr4 & X86_CR4_PAE);
2410 break;
2411#endif
2412 }
2413 default:
2414 AssertFailed();
2415 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2416 }
2417 }
2418
2419 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2420 u64GuestCr4 |= fSetCr4;
2421 u64GuestCr4 &= fZapCr4;
2422
2423 Assert(!RT_HI_U32(u64GuestCr4));
2424 Assert(u64GuestCr4 & X86_CR4_VMXE);
2425
2426 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2427 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2428 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2429
2430#ifndef IN_NEM_DARWIN
2431 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2432 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2433 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2434 {
2435 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2436 hmR0VmxUpdateStartVmFunction(pVCpu);
2437 }
2438#endif
2439
2440 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2441
2442 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2443 }
2444 return rc;
2445}
2446
2447
2448#ifdef VBOX_STRICT
2449/**
2450 * Strict function to validate segment registers.
2451 *
2452 * @param pVCpu The cross context virtual CPU structure.
2453 * @param pVmcsInfo The VMCS info. object.
2454 *
2455 * @remarks Will import guest CR0 on strict builds during validation of
2456 * segments.
2457 */
2458static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2459{
2460 /*
2461 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2462 *
2463 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2464 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2465 * unusable bit and doesn't change the guest-context value.
2466 */
2467 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2468 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2469 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2470 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2471 && ( !CPUMIsGuestInRealModeEx(pCtx)
2472 && !CPUMIsGuestInV86ModeEx(pCtx)))
2473 {
2474 /* Protected mode checks */
2475 /* CS */
2476 Assert(pCtx->cs.Attr.n.u1Present);
2477 Assert(!(pCtx->cs.Attr.u & 0xf00));
2478 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2479 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2480 || !(pCtx->cs.Attr.n.u1Granularity));
2481 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2482 || (pCtx->cs.Attr.n.u1Granularity));
2483 /* CS cannot be loaded with NULL in protected mode. */
2484 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2485 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2486 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2487 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2488 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2489 else
2490 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2491 /* SS */
2492 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2493 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2494 if ( !(pCtx->cr0 & X86_CR0_PE)
2495 || pCtx->cs.Attr.n.u4Type == 3)
2496 {
2497 Assert(!pCtx->ss.Attr.n.u2Dpl);
2498 }
2499 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2500 {
2501 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2502 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2503 Assert(pCtx->ss.Attr.n.u1Present);
2504 Assert(!(pCtx->ss.Attr.u & 0xf00));
2505 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2506 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2507 || !(pCtx->ss.Attr.n.u1Granularity));
2508 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2509 || (pCtx->ss.Attr.n.u1Granularity));
2510 }
2511 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2512 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2513 {
2514 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2515 Assert(pCtx->ds.Attr.n.u1Present);
2516 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2517 Assert(!(pCtx->ds.Attr.u & 0xf00));
2518 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2519 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2520 || !(pCtx->ds.Attr.n.u1Granularity));
2521 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2522 || (pCtx->ds.Attr.n.u1Granularity));
2523 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2524 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2525 }
2526 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2527 {
2528 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2529 Assert(pCtx->es.Attr.n.u1Present);
2530 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2531 Assert(!(pCtx->es.Attr.u & 0xf00));
2532 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2533 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2534 || !(pCtx->es.Attr.n.u1Granularity));
2535 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2536 || (pCtx->es.Attr.n.u1Granularity));
2537 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2538 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2539 }
2540 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2541 {
2542 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2543 Assert(pCtx->fs.Attr.n.u1Present);
2544 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2545 Assert(!(pCtx->fs.Attr.u & 0xf00));
2546 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2547 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2548 || !(pCtx->fs.Attr.n.u1Granularity));
2549 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2550 || (pCtx->fs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2552 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2553 }
2554 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2555 {
2556 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2557 Assert(pCtx->gs.Attr.n.u1Present);
2558 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2559 Assert(!(pCtx->gs.Attr.u & 0xf00));
2560 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2561 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2562 || !(pCtx->gs.Attr.n.u1Granularity));
2563 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2564 || (pCtx->gs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2566 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2567 }
2568 /* 64-bit capable CPUs. */
2569 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2570 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2571 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2572 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2573 }
2574 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2575 || ( CPUMIsGuestInRealModeEx(pCtx)
2576 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2577 {
2578 /* Real and v86 mode checks. */
2579 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2580 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2581#ifndef IN_NEM_DARWIN
2582 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2583 {
2584 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2585 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2586 }
2587 else
2588#endif
2589 {
2590 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2591 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2592 }
2593
2594 /* CS */
2595 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2596 Assert(pCtx->cs.u32Limit == 0xffff);
2597 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2598 /* SS */
2599 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2600 Assert(pCtx->ss.u32Limit == 0xffff);
2601 Assert(u32SSAttr == 0xf3);
2602 /* DS */
2603 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2604 Assert(pCtx->ds.u32Limit == 0xffff);
2605 Assert(u32DSAttr == 0xf3);
2606 /* ES */
2607 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2608 Assert(pCtx->es.u32Limit == 0xffff);
2609 Assert(u32ESAttr == 0xf3);
2610 /* FS */
2611 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2612 Assert(pCtx->fs.u32Limit == 0xffff);
2613 Assert(u32FSAttr == 0xf3);
2614 /* GS */
2615 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2616 Assert(pCtx->gs.u32Limit == 0xffff);
2617 Assert(u32GSAttr == 0xf3);
2618 /* 64-bit capable CPUs. */
2619 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2620 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2621 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2622 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2623 }
2624}
2625#endif /* VBOX_STRICT */
2626
2627
2628/**
2629 * Exports a guest segment register into the guest-state area in the VMCS.
2630 *
2631 * @returns VBox status code.
2632 * @param pVCpu The cross context virtual CPU structure.
2633 * @param pVmcsInfo The VMCS info. object.
2634 * @param iSegReg The segment register number (X86_SREG_XXX).
2635 * @param pSelReg Pointer to the segment selector.
2636 *
2637 * @remarks No-long-jump zone!!!
2638 */
2639static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2640{
2641 Assert(iSegReg < X86_SREG_COUNT);
2642
2643 uint32_t u32Access = pSelReg->Attr.u;
2644#ifndef IN_NEM_DARWIN
2645 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2646#endif
2647 {
2648 /*
2649 * The way to differentiate between whether this is really a null selector or was just
2650 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2651 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2652 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2653 * NULL selectors loaded in protected-mode have their attribute as 0.
2654 */
2655 if (u32Access)
2656 { }
2657 else
2658 u32Access = X86DESCATTR_UNUSABLE;
2659 }
2660#ifndef IN_NEM_DARWIN
2661 else
2662 {
2663 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2664 u32Access = 0xf3;
2665 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2666 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2667 RT_NOREF_PV(pVCpu);
2668 }
2669#else
2670 RT_NOREF(pVmcsInfo);
2671#endif
2672
2673 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2674 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2675 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2676
2677 /*
2678 * Commit it to the VMCS.
2679 */
2680 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2681 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2682 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2683 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2684 return VINF_SUCCESS;
2685}
2686
2687
2688/**
2689 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2690 * area in the VMCS.
2691 *
2692 * @returns VBox status code.
2693 * @param pVCpu The cross context virtual CPU structure.
2694 * @param pVmxTransient The VMX-transient structure.
2695 *
2696 * @remarks Will import guest CR0 on strict builds during validation of
2697 * segments.
2698 * @remarks No-long-jump zone!!!
2699 */
2700static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2701{
2702 int rc = VERR_INTERNAL_ERROR_5;
2703#ifndef IN_NEM_DARWIN
2704 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2705#endif
2706 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2707 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2708#ifndef IN_NEM_DARWIN
2709 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2710#endif
2711
2712 /*
2713 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2714 */
2715 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2716 {
2717 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2718 {
2719 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2720#ifndef IN_NEM_DARWIN
2721 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2722 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2723#endif
2724 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2725 AssertRC(rc);
2726 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2727 }
2728
2729 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2730 {
2731 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2732#ifndef IN_NEM_DARWIN
2733 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2734 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2735#endif
2736 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2737 AssertRC(rc);
2738 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2739 }
2740
2741 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2742 {
2743 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2744#ifndef IN_NEM_DARWIN
2745 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2746 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2747#endif
2748 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2749 AssertRC(rc);
2750 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2751 }
2752
2753 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2754 {
2755 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2756#ifndef IN_NEM_DARWIN
2757 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2758 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2759#endif
2760 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2761 AssertRC(rc);
2762 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2763 }
2764
2765 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2766 {
2767 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2768#ifndef IN_NEM_DARWIN
2769 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2770 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2771#endif
2772 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2773 AssertRC(rc);
2774 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2775 }
2776
2777 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2778 {
2779 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2780#ifndef IN_NEM_DARWIN
2781 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2782 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2783#endif
2784 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2785 AssertRC(rc);
2786 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2787 }
2788
2789#ifdef VBOX_STRICT
2790 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2791#endif
2792 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2793 pCtx->cs.Attr.u));
2794 }
2795
2796 /*
2797 * Guest TR.
2798 */
2799 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2800 {
2801 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2802
2803 /*
2804 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2805 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2806 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2807 */
2808 uint16_t u16Sel;
2809 uint32_t u32Limit;
2810 uint64_t u64Base;
2811 uint32_t u32AccessRights;
2812#ifndef IN_NEM_DARWIN
2813 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2814#endif
2815 {
2816 u16Sel = pCtx->tr.Sel;
2817 u32Limit = pCtx->tr.u32Limit;
2818 u64Base = pCtx->tr.u64Base;
2819 u32AccessRights = pCtx->tr.Attr.u;
2820 }
2821#ifndef IN_NEM_DARWIN
2822 else
2823 {
2824 Assert(!pVmxTransient->fIsNestedGuest);
2825 Assert(pVM->hm.s.vmx.pRealModeTSS);
2826 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2827
2828 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2829 RTGCPHYS GCPhys;
2830 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2831 AssertRCReturn(rc, rc);
2832
2833 X86DESCATTR DescAttr;
2834 DescAttr.u = 0;
2835 DescAttr.n.u1Present = 1;
2836 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2837
2838 u16Sel = 0;
2839 u32Limit = HM_VTX_TSS_SIZE;
2840 u64Base = GCPhys;
2841 u32AccessRights = DescAttr.u;
2842 }
2843#endif
2844
2845 /* Validate. */
2846 Assert(!(u16Sel & RT_BIT(2)));
2847 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2848 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2849 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2850 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2851 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2852 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2853 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2854 Assert( (u32Limit & 0xfff) == 0xfff
2855 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2856 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2857 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2858
2859 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2860 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2861 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2863
2864 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2865 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2866 }
2867
2868 /*
2869 * Guest GDTR.
2870 */
2871 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2872 {
2873 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2874
2875 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2876 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2877
2878 /* Validate. */
2879 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2880
2881 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2882 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2883 }
2884
2885 /*
2886 * Guest LDTR.
2887 */
2888 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2889 {
2890 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2891
2892 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2893 uint32_t u32Access;
2894 if ( !pVmxTransient->fIsNestedGuest
2895 && !pCtx->ldtr.Attr.u)
2896 u32Access = X86DESCATTR_UNUSABLE;
2897 else
2898 u32Access = pCtx->ldtr.Attr.u;
2899
2900 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2901 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2902 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2904
2905 /* Validate. */
2906 if (!(u32Access & X86DESCATTR_UNUSABLE))
2907 {
2908 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2909 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2910 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2911 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2912 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2913 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2914 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2915 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2916 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2917 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2918 }
2919
2920 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2921 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2922 }
2923
2924 /*
2925 * Guest IDTR.
2926 */
2927 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2928 {
2929 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2930
2931 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2932 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2933
2934 /* Validate. */
2935 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2936
2937 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2938 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2939 }
2940
2941 return VINF_SUCCESS;
2942}
2943
2944
2945/**
2946 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2947 * VM-exit interruption info type.
2948 *
2949 * @returns The IEM exception flags.
2950 * @param uVector The event vector.
2951 * @param uVmxEventType The VMX event type.
2952 *
2953 * @remarks This function currently only constructs flags required for
2954 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2955 * and CR2 aspects of an exception are not included).
2956 */
2957static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2958{
2959 uint32_t fIemXcptFlags;
2960 switch (uVmxEventType)
2961 {
2962 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2963 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2964 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2965 break;
2966
2967 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2968 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2969 break;
2970
2971 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2972 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2973 break;
2974
2975 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2976 {
2977 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2978 if (uVector == X86_XCPT_BP)
2979 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2980 else if (uVector == X86_XCPT_OF)
2981 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2982 else
2983 {
2984 fIemXcptFlags = 0;
2985 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2986 }
2987 break;
2988 }
2989
2990 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2991 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2992 break;
2993
2994 default:
2995 fIemXcptFlags = 0;
2996 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2997 break;
2998 }
2999 return fIemXcptFlags;
3000}
3001
3002
3003/**
3004 * Sets an event as a pending event to be injected into the guest.
3005 *
3006 * @param pVCpu The cross context virtual CPU structure.
3007 * @param u32IntInfo The VM-entry interruption-information field.
3008 * @param cbInstr The VM-entry instruction length in bytes (for
3009 * software interrupts, exceptions and privileged
3010 * software exceptions).
3011 * @param u32ErrCode The VM-entry exception error code.
3012 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3013 * page-fault.
3014 */
3015DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3016 RTGCUINTPTR GCPtrFaultAddress)
3017{
3018 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3019 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3020 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3021 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3022 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3023 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3024}
3025
3026
3027/**
3028 * Sets an external interrupt as pending-for-injection into the VM.
3029 *
3030 * @param pVCpu The cross context virtual CPU structure.
3031 * @param u8Interrupt The external interrupt vector.
3032 */
3033DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3034{
3035 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3036 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3037 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3039 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3040 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
3041}
3042
3043
3044/**
3045 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3046 *
3047 * @param pVCpu The cross context virtual CPU structure.
3048 */
3049DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3050{
3051 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3052 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3053 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3055 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3056 Log4Func(("NMI pending injection\n"));
3057}
3058
3059
3060/**
3061 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3062 *
3063 * @param pVCpu The cross context virtual CPU structure.
3064 */
3065DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3106/**
3107 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 * @param u32ErrCode The error code for the general-protection exception.
3111 */
3112DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3113{
3114 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3118 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3119}
3120
3121
3122/**
3123 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param u32ErrCode The error code for the stack exception.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3135}
3136#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3137
3138
3139/**
3140 * Fixes up attributes for the specified segment register.
3141 *
3142 * @param pVCpu The cross context virtual CPU structure.
3143 * @param pSelReg The segment register that needs fixing.
3144 * @param pszRegName The register name (for logging and assertions).
3145 */
3146static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3147{
3148 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3149
3150 /*
3151 * If VT-x marks the segment as unusable, most other bits remain undefined:
3152 * - For CS the L, D and G bits have meaning.
3153 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3154 * - For the remaining data segments no bits are defined.
3155 *
3156 * The present bit and the unusable bit has been observed to be set at the
3157 * same time (the selector was supposed to be invalid as we started executing
3158 * a V8086 interrupt in ring-0).
3159 *
3160 * What should be important for the rest of the VBox code, is that the P bit is
3161 * cleared. Some of the other VBox code recognizes the unusable bit, but
3162 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3163 * safe side here, we'll strip off P and other bits we don't care about. If
3164 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3165 *
3166 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3167 */
3168#ifdef VBOX_STRICT
3169 uint32_t const uAttr = pSelReg->Attr.u;
3170#endif
3171
3172 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3173 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3174 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3175
3176#ifdef VBOX_STRICT
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Disable(pVCpu);
3179# endif
3180 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3181# ifdef DEBUG_bird
3182 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3183 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3184 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3185# endif
3186# ifndef IN_NEM_DARWIN
3187 VMMRZCallRing3Enable(pVCpu);
3188# endif
3189 NOREF(uAttr);
3190#endif
3191 RT_NOREF2(pVCpu, pszRegName);
3192}
3193
3194
3195/**
3196 * Imports a guest segment register from the current VMCS into the guest-CPU
3197 * context.
3198 *
3199 * @param pVCpu The cross context virtual CPU structure.
3200 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3201 *
3202 * @remarks Called with interrupts and/or preemption disabled.
3203 */
3204template<uint32_t const a_iSegReg>
3205DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3206{
3207 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3208 /* Check that the macros we depend upon here and in the export parenter function works: */
3209#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3210 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3211 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3212 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3213 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3216 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3217 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3218 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3219 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3220
3221 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3222
3223 uint16_t u16Sel;
3224 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3225 pSelReg->Sel = u16Sel;
3226 pSelReg->ValidSel = u16Sel;
3227
3228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3229 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3230
3231 uint32_t u32Attr;
3232 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3233 pSelReg->Attr.u = u32Attr;
3234 if (u32Attr & X86DESCATTR_UNUSABLE)
3235 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3236
3237 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3238}
3239
3240
3241/**
3242 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure.
3245 *
3246 * @remarks Called with interrupts and/or preemption disabled.
3247 */
3248DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3249{
3250 uint16_t u16Sel;
3251 uint64_t u64Base;
3252 uint32_t u32Limit, u32Attr;
3253 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3255 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3257
3258 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3259 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3260 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3261 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3262 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3263 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3264 if (u32Attr & X86DESCATTR_UNUSABLE)
3265 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3266}
3267
3268
3269/**
3270 * Imports the guest TR from the VMCS into the guest-CPU context.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure.
3273 *
3274 * @remarks Called with interrupts and/or preemption disabled.
3275 */
3276DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3277{
3278 uint16_t u16Sel;
3279 uint64_t u64Base;
3280 uint32_t u32Limit, u32Attr;
3281 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3282 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3283 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3284 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3285
3286 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3287 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3288 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3289 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3290 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3291 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3292 /* TR is the only selector that can never be unusable. */
3293 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3294}
3295
3296
3297/**
3298 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3299 *
3300 * @returns The RIP value.
3301 * @param pVCpu The cross context virtual CPU structure.
3302 *
3303 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3304 * @remarks Do -not- call this function directly!
3305 */
3306DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3307{
3308 uint64_t u64Val;
3309 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3310 AssertRC(rc);
3311
3312 pVCpu->cpum.GstCtx.rip = u64Val;
3313
3314 return u64Val;
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3330 {
3331 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3332 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3333 }
3334}
3335
3336
3337/**
3338 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3339 *
3340 * @param pVCpu The cross context virtual CPU structure.
3341 * @param pVmcsInfo The VMCS info. object.
3342 *
3343 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3344 * @remarks Do -not- call this function directly!
3345 */
3346DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3347{
3348 uint64_t fRFlags;
3349 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3350 AssertRC(rc);
3351
3352 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3353 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3354
3355 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3356#ifndef IN_NEM_DARWIN
3357 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3358 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3359 { /* mostly likely */ }
3360 else
3361 {
3362 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3363 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3364 }
3365#else
3366 RT_NOREF(pVmcsInfo);
3367#endif
3368}
3369
3370
3371/**
3372 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3373 *
3374 * @param pVCpu The cross context virtual CPU structure.
3375 * @param pVmcsInfo The VMCS info. object.
3376 *
3377 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3378 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3379 * instead!!!
3380 */
3381DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3382{
3383 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3384 {
3385 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3386 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3387 }
3388}
3389
3390
3391#ifndef IN_NEM_DARWIN
3392/**
3393 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3394 * context.
3395 *
3396 * The other MSRs are in the VM-exit MSR-store.
3397 *
3398 * @returns VBox status code.
3399 * @param pVCpu The cross context virtual CPU structure.
3400 * @param pVmcsInfo The VMCS info. object.
3401 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3402 * unexpected errors). Ignored in NEM/darwin context.
3403 */
3404DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3405{
3406 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3407 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3408 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3409 Assert(pMsrs);
3410 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3411 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3412 for (uint32_t i = 0; i < cMsrs; i++)
3413 {
3414 uint32_t const idMsr = pMsrs[i].u32Msr;
3415 switch (idMsr)
3416 {
3417 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3418 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3419 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3420 default:
3421 {
3422 uint32_t idxLbrMsr;
3423 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3424 if (VM_IS_VMX_LBR(pVM))
3425 {
3426 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3427 {
3428 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3429 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3430 break;
3431 }
3432 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3433 {
3434 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3435 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3436 break;
3437 }
3438 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3439 {
3440 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3441 break;
3442 }
3443 /* Fallthru (no break) */
3444 }
3445 pVCpu->cpum.GstCtx.fExtrn = 0;
3446 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3447 ASMSetFlags(fEFlags);
3448 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3449 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3450 }
3451 }
3452 }
3453 return VINF_SUCCESS;
3454}
3455#endif /* !IN_NEM_DARWIN */
3456
3457
3458/**
3459 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3460 *
3461 * @param pVCpu The cross context virtual CPU structure.
3462 * @param pVmcsInfo The VMCS info. object.
3463 */
3464DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3465{
3466 uint64_t u64Cr0;
3467 uint64_t u64Shadow;
3468 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3469 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3470#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3471 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3472 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3473#else
3474 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3475 {
3476 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3477 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3478 }
3479 else
3480 {
3481 /*
3482 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3483 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3484 * re-construct CR0. See @bugref{9180#c95} for details.
3485 */
3486 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3487 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3488 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3489 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3490 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3491 Assert(u64Cr0 & X86_CR0_NE);
3492 }
3493#endif
3494
3495#ifndef IN_NEM_DARWIN
3496 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3497#endif
3498 CPUMSetGuestCR0(pVCpu, u64Cr0);
3499#ifndef IN_NEM_DARWIN
3500 VMMRZCallRing3Enable(pVCpu);
3501#endif
3502}
3503
3504
3505/**
3506 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3507 *
3508 * @param pVCpu The cross context virtual CPU structure.
3509 */
3510DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3511{
3512 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3513 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3514
3515 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3516 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3517 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3518 && CPUMIsGuestPagingEnabledEx(pCtx)))
3519 {
3520 uint64_t u64Cr3;
3521 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3522 if (pCtx->cr3 != u64Cr3)
3523 {
3524 pCtx->cr3 = u64Cr3;
3525 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3526 }
3527
3528 /*
3529 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3530 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3531 */
3532 if (CPUMIsGuestInPAEModeEx(pCtx))
3533 {
3534 X86PDPE aPaePdpes[4];
3535 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3536 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3537 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3538 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3539 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3540 {
3541 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3542 /* PGM now updates PAE PDPTEs while updating CR3. */
3543 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3544 }
3545 }
3546 }
3547}
3548
3549
3550/**
3551 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3552 *
3553 * @param pVCpu The cross context virtual CPU structure.
3554 * @param pVmcsInfo The VMCS info. object.
3555 */
3556DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3557{
3558 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3559 uint64_t u64Cr4;
3560 uint64_t u64Shadow;
3561 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3562 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3563#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3564 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3565 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3566#else
3567 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3568 {
3569 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3570 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3571 }
3572 else
3573 {
3574 /*
3575 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3576 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3577 * re-construct CR4. See @bugref{9180#c95} for details.
3578 */
3579 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3580 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3581 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3582 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3583 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3584 Assert(u64Cr4 & X86_CR4_VMXE);
3585 }
3586#endif
3587 pCtx->cr4 = u64Cr4;
3588}
3589
3590
3591/**
3592 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3593 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3594 */
3595DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3596{
3597 /*
3598 * We must import RIP here to set our EM interrupt-inhibited state.
3599 * We also import RFLAGS as our code that evaluates pending interrupts
3600 * before VM-entry requires it.
3601 */
3602 vmxHCImportGuestRip(pVCpu);
3603 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3604
3605 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3606 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3607 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3608 pVCpu->cpum.GstCtx.rip);
3609 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3610}
3611
3612
3613/**
3614 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3615 * context.
3616 *
3617 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3618 *
3619 * @param pVCpu The cross context virtual CPU structure.
3620 * @param pVmcsInfo The VMCS info. object.
3621 *
3622 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3623 * do not log!
3624 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3625 * instead!!!
3626 */
3627DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3628{
3629 uint32_t u32Val;
3630 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3631 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3632 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3633 if (!u32Val)
3634 {
3635 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3636 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3637 }
3638 else
3639 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3640}
3641
3642
3643/**
3644 * Worker for VMXR0ImportStateOnDemand.
3645 *
3646 * @returns VBox status code.
3647 * @param pVCpu The cross context virtual CPU structure.
3648 * @param pVmcsInfo The VMCS info. object.
3649 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3650 */
3651static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3652{
3653 int rc = VINF_SUCCESS;
3654 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3655 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3656 uint32_t u32Val;
3657
3658 /*
3659 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3660 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3661 * neither are other host platforms.
3662 *
3663 * Committing this temporarily as it prevents BSOD.
3664 *
3665 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3666 */
3667#ifdef RT_OS_WINDOWS
3668 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3669 return VERR_HM_IPE_1;
3670#endif
3671
3672 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3673
3674#ifndef IN_NEM_DARWIN
3675 /*
3676 * We disable interrupts to make the updating of the state and in particular
3677 * the fExtrn modification atomic wrt to preemption hooks.
3678 */
3679 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3680#endif
3681
3682 fWhat &= pCtx->fExtrn;
3683 if (fWhat)
3684 {
3685 do
3686 {
3687 if (fWhat & CPUMCTX_EXTRN_RIP)
3688 vmxHCImportGuestRip(pVCpu);
3689
3690 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3691 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3692
3693 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3694 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3695 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3696
3697 if (fWhat & CPUMCTX_EXTRN_RSP)
3698 {
3699 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3700 AssertRC(rc);
3701 }
3702
3703 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3704 {
3705 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3706#ifndef IN_NEM_DARWIN
3707 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3708#else
3709 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3710#endif
3711 if (fWhat & CPUMCTX_EXTRN_CS)
3712 {
3713 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3714 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3715 if (fRealOnV86Active)
3716 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3717 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3718 }
3719 if (fWhat & CPUMCTX_EXTRN_SS)
3720 {
3721 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3722 if (fRealOnV86Active)
3723 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3724 }
3725 if (fWhat & CPUMCTX_EXTRN_DS)
3726 {
3727 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3728 if (fRealOnV86Active)
3729 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3730 }
3731 if (fWhat & CPUMCTX_EXTRN_ES)
3732 {
3733 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3734 if (fRealOnV86Active)
3735 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3736 }
3737 if (fWhat & CPUMCTX_EXTRN_FS)
3738 {
3739 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3740 if (fRealOnV86Active)
3741 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3742 }
3743 if (fWhat & CPUMCTX_EXTRN_GS)
3744 {
3745 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3746 if (fRealOnV86Active)
3747 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3748 }
3749 }
3750
3751 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3752 {
3753 if (fWhat & CPUMCTX_EXTRN_LDTR)
3754 vmxHCImportGuestLdtr(pVCpu);
3755
3756 if (fWhat & CPUMCTX_EXTRN_GDTR)
3757 {
3758 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3759 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3760 pCtx->gdtr.cbGdt = u32Val;
3761 }
3762
3763 /* Guest IDTR. */
3764 if (fWhat & CPUMCTX_EXTRN_IDTR)
3765 {
3766 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3767 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3768 pCtx->idtr.cbIdt = u32Val;
3769 }
3770
3771 /* Guest TR. */
3772 if (fWhat & CPUMCTX_EXTRN_TR)
3773 {
3774#ifndef IN_NEM_DARWIN
3775 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3776 don't need to import that one. */
3777 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3778#endif
3779 vmxHCImportGuestTr(pVCpu);
3780 }
3781 }
3782
3783 if (fWhat & CPUMCTX_EXTRN_DR7)
3784 {
3785#ifndef IN_NEM_DARWIN
3786 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3787#endif
3788 {
3789 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3790 AssertRC(rc);
3791 }
3792 }
3793
3794 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3795 {
3796 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3797 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3798 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3799 pCtx->SysEnter.cs = u32Val;
3800 }
3801
3802#ifndef IN_NEM_DARWIN
3803 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3804 {
3805 if ( pVM->hmr0.s.fAllow64BitGuests
3806 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3807 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3808 }
3809
3810 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3811 {
3812 if ( pVM->hmr0.s.fAllow64BitGuests
3813 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3814 {
3815 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3816 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3817 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3818 }
3819 }
3820
3821 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3822 {
3823 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3824 AssertRCReturn(rc, rc);
3825 }
3826#else
3827 NOREF(pVM);
3828#endif
3829
3830 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3831 {
3832 if (fWhat & CPUMCTX_EXTRN_CR0)
3833 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3834
3835 if (fWhat & CPUMCTX_EXTRN_CR4)
3836 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3837
3838 if (fWhat & CPUMCTX_EXTRN_CR3)
3839 vmxHCImportGuestCr3(pVCpu);
3840 }
3841
3842#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3843 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3844 {
3845 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3846 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3847 {
3848 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3849 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3850 if (RT_SUCCESS(rc))
3851 { /* likely */ }
3852 else
3853 break;
3854 }
3855 }
3856#endif
3857 } while (0);
3858
3859 if (RT_SUCCESS(rc))
3860 {
3861 /* Update fExtrn. */
3862 pCtx->fExtrn &= ~fWhat;
3863
3864 /* If everything has been imported, clear the HM keeper bit. */
3865 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3866 {
3867#ifndef IN_NEM_DARWIN
3868 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3869#else
3870 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3871#endif
3872 Assert(!pCtx->fExtrn);
3873 }
3874 }
3875 }
3876#ifndef IN_NEM_DARWIN
3877 else
3878 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3879
3880 /*
3881 * Restore interrupts.
3882 */
3883 ASMSetFlags(fEFlags);
3884#endif
3885
3886 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3887
3888 if (RT_SUCCESS(rc))
3889 { /* likely */ }
3890 else
3891 return rc;
3892
3893 /*
3894 * Honor any pending CR3 updates.
3895 *
3896 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3897 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3898 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3899 *
3900 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3901 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3902 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3903 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3904 *
3905 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3906 *
3907 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3908 */
3909 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3910#ifndef IN_NEM_DARWIN
3911 && VMMRZCallRing3IsEnabled(pVCpu)
3912#endif
3913 )
3914 {
3915 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3916 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3917 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3918 }
3919
3920 return VINF_SUCCESS;
3921}
3922
3923
3924/**
3925 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3926 *
3927 * @returns VBox status code.
3928 * @param pVCpu The cross context virtual CPU structure.
3929 * @param pVmcsInfo The VMCS info. object.
3930 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3931 * in NEM/darwin context.
3932 * @tparam a_fWhat What to import, zero or more bits from
3933 * HMVMX_CPUMCTX_EXTRN_ALL.
3934 */
3935template<uint64_t const a_fWhat>
3936static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3937{
3938 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3939 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3940 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3941 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3942
3943 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3944
3945 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3946
3947 /* RIP and RFLAGS may have been imported already by the post exit code
3948 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3949 of the code is skipping this part of the code. */
3950 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3951 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3952 {
3953 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3954 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3955
3956 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3957 {
3958 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3959 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3960 else
3961 vmxHCImportGuestCoreRip(pVCpu);
3962 }
3963 }
3964
3965 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3966 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3967 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3968
3969 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3970 {
3971 if (a_fWhat & CPUMCTX_EXTRN_CS)
3972 {
3973 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3974 /** @todo try get rid of this carp, it smells and is probably never ever
3975 * used: */
3976 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3977 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3978 {
3979 vmxHCImportGuestCoreRip(pVCpu);
3980 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3981 }
3982 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3983 }
3984 if (a_fWhat & CPUMCTX_EXTRN_SS)
3985 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3986 if (a_fWhat & CPUMCTX_EXTRN_DS)
3987 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3988 if (a_fWhat & CPUMCTX_EXTRN_ES)
3989 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3990 if (a_fWhat & CPUMCTX_EXTRN_FS)
3991 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3992 if (a_fWhat & CPUMCTX_EXTRN_GS)
3993 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3994
3995 /* Guest TR.
3996 Real-mode emulation using virtual-8086 mode has the fake TSS
3997 (pRealModeTSS) in TR, don't need to import that one. */
3998#ifndef IN_NEM_DARWIN
3999 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4000 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4001 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4002#else
4003 if (a_fWhat & CPUMCTX_EXTRN_TR)
4004#endif
4005 vmxHCImportGuestTr(pVCpu);
4006
4007#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4008 if (fRealOnV86Active)
4009 {
4010 if (a_fWhat & CPUMCTX_EXTRN_CS)
4011 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4012 if (a_fWhat & CPUMCTX_EXTRN_SS)
4013 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4014 if (a_fWhat & CPUMCTX_EXTRN_DS)
4015 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4016 if (a_fWhat & CPUMCTX_EXTRN_ES)
4017 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4018 if (a_fWhat & CPUMCTX_EXTRN_FS)
4019 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4020 if (a_fWhat & CPUMCTX_EXTRN_GS)
4021 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4022 }
4023#endif
4024 }
4025
4026 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4027 {
4028 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4029 AssertRC(rc);
4030 }
4031
4032 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4033 vmxHCImportGuestLdtr(pVCpu);
4034
4035 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4036 {
4037 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4038 uint32_t u32Val;
4039 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4040 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4041 }
4042
4043 /* Guest IDTR. */
4044 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4045 {
4046 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4047 uint32_t u32Val;
4048 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4049 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4050 }
4051
4052 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4053 {
4054#ifndef IN_NEM_DARWIN
4055 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4056#endif
4057 {
4058 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4059 AssertRC(rc);
4060 }
4061 }
4062
4063 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4064 {
4065 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4066 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4067 uint32_t u32Val;
4068 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4069 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4070 }
4071
4072#ifndef IN_NEM_DARWIN
4073 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4074 {
4075 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4076 && pVM->hmr0.s.fAllow64BitGuests)
4077 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4078 }
4079
4080 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4081 {
4082 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4083 && pVM->hmr0.s.fAllow64BitGuests)
4084 {
4085 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4086 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4087 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4088 }
4089 }
4090
4091 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4092 {
4093 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4094 AssertRCReturn(rc1, rc1);
4095 }
4096#else
4097 NOREF(pVM);
4098#endif
4099
4100 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4101 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4102
4103 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4104 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4105
4106 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4107 vmxHCImportGuestCr3(pVCpu);
4108
4109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4110 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4111 {
4112 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4113 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4114 {
4115 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4116 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4117 AssertRCReturn(rc, rc);
4118 }
4119 }
4120#endif
4121
4122 /* Update fExtrn. */
4123 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4124
4125 /* If everything has been imported, clear the HM keeper bit. */
4126 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4127 {
4128#ifndef IN_NEM_DARWIN
4129 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4130#else
4131 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4132#endif
4133 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4134 }
4135
4136 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4137
4138 /*
4139 * Honor any pending CR3 updates.
4140 *
4141 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4142 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4143 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4144 *
4145 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4146 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4147 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4148 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4149 *
4150 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4151 *
4152 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4153 */
4154#ifndef IN_NEM_DARWIN
4155 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4156 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4157 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4158 return VINF_SUCCESS;
4159 ASMSetFlags(fEFlags);
4160#else
4161 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4162 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4163 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4164 return VINF_SUCCESS;
4165 RT_NOREF_PV(fEFlags);
4166#endif
4167
4168 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4169 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4170 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4171 return VINF_SUCCESS;
4172}
4173
4174
4175/**
4176 * Internal state fetcher.
4177 *
4178 * @returns VBox status code.
4179 * @param pVCpu The cross context virtual CPU structure.
4180 * @param pVmcsInfo The VMCS info. object.
4181 * @param pszCaller For logging.
4182 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4183 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4184 * already. This is ORed together with @a a_fWhat when
4185 * calculating what needs fetching (just for safety).
4186 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4187 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4188 * already. This is ORed together with @a a_fWhat when
4189 * calculating what needs fetching (just for safety).
4190 */
4191template<uint64_t const a_fWhat,
4192 uint64_t const a_fDoneLocal = 0,
4193 uint64_t const a_fDonePostExit = 0
4194#ifndef IN_NEM_DARWIN
4195 | CPUMCTX_EXTRN_INHIBIT_INT
4196 | CPUMCTX_EXTRN_INHIBIT_NMI
4197# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4198 | HMVMX_CPUMCTX_EXTRN_ALL
4199# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4200 | CPUMCTX_EXTRN_RFLAGS
4201# endif
4202#else /* IN_NEM_DARWIN */
4203 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4204#endif /* IN_NEM_DARWIN */
4205>
4206DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4207{
4208 RT_NOREF_PV(pszCaller);
4209 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4210 {
4211#ifndef IN_NEM_DARWIN
4212 /*
4213 * We disable interrupts to make the updating of the state and in particular
4214 * the fExtrn modification atomic wrt to preemption hooks.
4215 */
4216 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4217#else
4218 RTCCUINTREG const fEFlags = 0;
4219#endif
4220
4221 /*
4222 * We combine all three parameters and take the (probably) inlined optimized
4223 * code path for the new things specified in a_fWhat.
4224 *
4225 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4226 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4227 * also take the streamlined path when both of these are cleared in fExtrn
4228 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4229 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4230 */
4231 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4232 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4233 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4234 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4235 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4236 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4237 {
4238 int const rc = vmxHCImportGuestStateInner< a_fWhat
4239 & HMVMX_CPUMCTX_EXTRN_ALL
4240 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4241#ifndef IN_NEM_DARWIN
4242 ASMSetFlags(fEFlags);
4243#endif
4244 return rc;
4245 }
4246
4247#ifndef IN_NEM_DARWIN
4248 ASMSetFlags(fEFlags);
4249#endif
4250
4251 /*
4252 * We shouldn't normally get here, but it may happen when executing
4253 * in the debug run-loops. Typically, everything should already have
4254 * been fetched then. Otherwise call the fallback state import function.
4255 */
4256 if (fWhatToDo == 0)
4257 { /* hope the cause was the debug loop or something similar */ }
4258 else
4259 {
4260 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4261 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4262 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4263 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4264 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4265 }
4266 }
4267 return VINF_SUCCESS;
4268}
4269
4270
4271/**
4272 * Check per-VM and per-VCPU force flag actions that require us to go back to
4273 * ring-3 for one reason or another.
4274 *
4275 * @returns Strict VBox status code (i.e. informational status codes too)
4276 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4277 * ring-3.
4278 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4279 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4280 * interrupts)
4281 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4282 * all EMTs to be in ring-3.
4283 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4284 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4285 * to the EM loop.
4286 *
4287 * @param pVCpu The cross context virtual CPU structure.
4288 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4289 * @param fStepping Whether we are single-stepping the guest using the
4290 * hypervisor debugger.
4291 *
4292 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4293 * is no longer in VMX non-root mode.
4294 */
4295static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4296{
4297#ifndef IN_NEM_DARWIN
4298 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4299#endif
4300
4301 /*
4302 * Update pending interrupts into the APIC's IRR.
4303 */
4304 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4305 APICUpdatePendingInterrupts(pVCpu);
4306
4307 /*
4308 * Anything pending? Should be more likely than not if we're doing a good job.
4309 */
4310 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4311 if ( !fStepping
4312 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4313 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4314 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4315 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4316 return VINF_SUCCESS;
4317
4318 /* Pending PGM C3 sync. */
4319 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4320 {
4321 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4322 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4323 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4324 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4325 if (rcStrict != VINF_SUCCESS)
4326 {
4327 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4328 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4329 return rcStrict;
4330 }
4331 }
4332
4333 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4334 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4335 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4336 {
4337 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4338 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4339 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4340 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4341 return rc;
4342 }
4343
4344 /* Pending VM request packets, such as hardware interrupts. */
4345 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4346 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4347 {
4348 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4349 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4350 return VINF_EM_PENDING_REQUEST;
4351 }
4352
4353 /* Pending PGM pool flushes. */
4354 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4355 {
4356 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4357 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4358 return VINF_PGM_POOL_FLUSH_PENDING;
4359 }
4360
4361 /* Pending DMA requests. */
4362 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4363 {
4364 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4365 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4366 return VINF_EM_RAW_TO_R3;
4367 }
4368
4369#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4370 /*
4371 * Pending nested-guest events.
4372 *
4373 * Please note the priority of these events are specified and important.
4374 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4375 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4376 *
4377 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be
4378 * handled here. They'll be handled by the hardware while executing the nested-guest
4379 * or by us when we injecting events that are not part of VM-entry of the nested-guest.
4380 */
4381 if (fIsNestedGuest)
4382 {
4383 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */
4384 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4385 {
4386 Log4Func(("Pending nested-guest APIC-write\n"));
4387 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4388 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4389 if ( rcStrict == VINF_SUCCESS
4390 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4391 return rcStrict;
4392 }
4393
4394 /* Pending nested-guest monitor-trap flag (MTF). */
4395 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4396 {
4397 Log4Func(("Pending nested-guest MTF\n"));
4398 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4399 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4400 return rcStrict;
4401 }
4402
4403 /* Pending nested-guest VMX-preemption timer expired. */
4404 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4405 {
4406 Log4Func(("Pending nested-guest preempt timer\n"));
4407 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4408 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4409 return rcStrict;
4410 }
4411 }
4412#else
4413 NOREF(fIsNestedGuest);
4414#endif
4415
4416 return VINF_SUCCESS;
4417}
4418
4419
4420/**
4421 * Converts any TRPM trap into a pending HM event. This is typically used when
4422 * entering from ring-3 (not longjmp returns).
4423 *
4424 * @param pVCpu The cross context virtual CPU structure.
4425 */
4426static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4427{
4428 Assert(TRPMHasTrap(pVCpu));
4429 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4430
4431 uint8_t uVector;
4432 TRPMEVENT enmTrpmEvent;
4433 uint32_t uErrCode;
4434 RTGCUINTPTR GCPtrFaultAddress;
4435 uint8_t cbInstr;
4436 bool fIcebp;
4437
4438 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4439 AssertRC(rc);
4440
4441 uint32_t u32IntInfo;
4442 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4443 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4444
4445 rc = TRPMResetTrap(pVCpu);
4446 AssertRC(rc);
4447 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4448 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4449
4450 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4451}
4452
4453
4454/**
4455 * Converts the pending HM event into a TRPM trap.
4456 *
4457 * @param pVCpu The cross context virtual CPU structure.
4458 */
4459static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4460{
4461 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4462
4463 /* If a trap was already pending, we did something wrong! */
4464 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4465
4466 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4467 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4468 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4469
4470 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4471
4472 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4473 AssertRC(rc);
4474
4475 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4476 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4477
4478 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4479 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4480 else
4481 {
4482 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4483 switch (uVectorType)
4484 {
4485 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4486 TRPMSetTrapDueToIcebp(pVCpu);
4487 RT_FALL_THRU();
4488 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4489 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4490 {
4491 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4492 || ( uVector == X86_XCPT_BP /* INT3 */
4493 || uVector == X86_XCPT_OF /* INTO */
4494 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4495 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4496 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4497 break;
4498 }
4499 }
4500 }
4501
4502 /* We're now done converting the pending event. */
4503 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4504}
4505
4506
4507/**
4508 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4509 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4510 *
4511 * @param pVCpu The cross context virtual CPU structure.
4512 * @param pVmcsInfo The VMCS info. object.
4513 */
4514static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4515{
4516 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4517 {
4518 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4519 {
4520 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4521 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4522 AssertRC(rc);
4523 }
4524 Log4Func(("Enabled interrupt-window exiting\n"));
4525 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4526}
4527
4528
4529/**
4530 * Clears the interrupt-window exiting control in the VMCS.
4531 *
4532 * @param pVCpu The cross context virtual CPU structure.
4533 * @param pVmcsInfo The VMCS info. object.
4534 */
4535DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4536{
4537 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4538 {
4539 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4540 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4541 AssertRC(rc);
4542 Log4Func(("Disabled interrupt-window exiting\n"));
4543 }
4544}
4545
4546
4547/**
4548 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4549 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4550 *
4551 * @param pVCpu The cross context virtual CPU structure.
4552 * @param pVmcsInfo The VMCS info. object.
4553 */
4554static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4555{
4556 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4557 {
4558 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4559 {
4560 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4561 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4562 AssertRC(rc);
4563 Log4Func(("Enabled NMI-window exiting\n"));
4564 }
4565 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4566}
4567
4568
4569/**
4570 * Clears the NMI-window exiting control in the VMCS.
4571 *
4572 * @param pVCpu The cross context virtual CPU structure.
4573 * @param pVmcsInfo The VMCS info. object.
4574 */
4575DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4576{
4577 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4578 {
4579 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4580 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4581 AssertRC(rc);
4582 Log4Func(("Disabled NMI-window exiting\n"));
4583 }
4584}
4585
4586
4587/**
4588 * Injects an event into the guest upon VM-entry by updating the relevant fields
4589 * in the VM-entry area in the VMCS.
4590 *
4591 * @returns Strict VBox status code (i.e. informational status codes too).
4592 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4593 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4594 *
4595 * @param pVCpu The cross context virtual CPU structure.
4596 * @param pVmcsInfo The VMCS info object.
4597 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4598 * @param pEvent The event being injected.
4599 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4600 * will be updated if necessary. This cannot not be NULL.
4601 * @param fStepping Whether we're single-stepping guest execution and should
4602 * return VINF_EM_DBG_STEPPED if the event is injected
4603 * directly (registers modified by us, not by hardware on
4604 * VM-entry).
4605 */
4606static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4607 bool fStepping, uint32_t *pfIntrState)
4608{
4609 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4610 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4611 Assert(pfIntrState);
4612
4613#ifdef IN_NEM_DARWIN
4614 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4615#endif
4616
4617 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4618 uint32_t u32IntInfo = pEvent->u64IntInfo;
4619 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4620 uint32_t const cbInstr = pEvent->cbInstr;
4621 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4622 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4623 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4624
4625#ifdef VBOX_STRICT
4626 /*
4627 * Validate the error-code-valid bit for hardware exceptions.
4628 * No error codes for exceptions in real-mode.
4629 *
4630 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4631 */
4632 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4633 && !CPUMIsGuestInRealModeEx(pCtx))
4634 {
4635 switch (uVector)
4636 {
4637 case X86_XCPT_PF:
4638 case X86_XCPT_DF:
4639 case X86_XCPT_TS:
4640 case X86_XCPT_NP:
4641 case X86_XCPT_SS:
4642 case X86_XCPT_GP:
4643 case X86_XCPT_AC:
4644 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4645 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4646 RT_FALL_THRU();
4647 default:
4648 break;
4649 }
4650 }
4651
4652 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4653 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4654 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4655#endif
4656
4657 RT_NOREF(uVector);
4658 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4659 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4660 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4661 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4662 {
4663 Assert(uVector <= X86_XCPT_LAST);
4664 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4665 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4666 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4667 }
4668 else
4669 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4670
4671 /*
4672 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4673 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4674 * interrupt handler in the (real-mode) guest.
4675 *
4676 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4677 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4678 */
4679 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4680 {
4681#ifndef IN_NEM_DARWIN
4682 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4683#endif
4684 {
4685 /*
4686 * For CPUs with unrestricted guest execution enabled and with the guest
4687 * in real-mode, we must not set the deliver-error-code bit.
4688 *
4689 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4690 */
4691 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4692 }
4693#ifndef IN_NEM_DARWIN
4694 else
4695 {
4696 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4697 Assert(PDMVmmDevHeapIsEnabled(pVM));
4698 Assert(pVM->hm.s.vmx.pRealModeTSS);
4699 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4700
4701 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4702 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4703 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4704 AssertRCReturn(rc2, rc2);
4705
4706 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4707 size_t const cbIdtEntry = sizeof(X86IDTR16);
4708 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4709 {
4710 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4711 if (uVector == X86_XCPT_DF)
4712 return VINF_EM_RESET;
4713
4714 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4715 No error codes for exceptions in real-mode. */
4716 if (uVector == X86_XCPT_GP)
4717 {
4718 static HMEVENT const s_EventXcptDf
4719 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4720 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4721 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4722 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4723 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4724 }
4725
4726 /*
4727 * If we're injecting an event with no valid IDT entry, inject a #GP.
4728 * No error codes for exceptions in real-mode.
4729 *
4730 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4731 */
4732 static HMEVENT const s_EventXcptGp
4733 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4734 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4735 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4736 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4737 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4738 }
4739
4740 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4741 uint16_t uGuestIp = pCtx->ip;
4742 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4743 {
4744 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4745 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4746 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4747 }
4748 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4749 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4750
4751 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4752 X86IDTR16 IdtEntry;
4753 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4754 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4755 AssertRCReturn(rc2, rc2);
4756
4757 /* Construct the stack frame for the interrupt/exception handler. */
4758 VBOXSTRICTRC rcStrict;
4759 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4760 if (rcStrict == VINF_SUCCESS)
4761 {
4762 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4763 if (rcStrict == VINF_SUCCESS)
4764 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4765 }
4766
4767 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4768 if (rcStrict == VINF_SUCCESS)
4769 {
4770 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4771 pCtx->rip = IdtEntry.offSel;
4772 pCtx->cs.Sel = IdtEntry.uSel;
4773 pCtx->cs.ValidSel = IdtEntry.uSel;
4774 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4775 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4776 && uVector == X86_XCPT_PF)
4777 pCtx->cr2 = GCPtrFault;
4778
4779 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4780 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4781 | HM_CHANGED_GUEST_RSP);
4782
4783 /*
4784 * If we delivered a hardware exception (other than an NMI) and if there was
4785 * block-by-STI in effect, we should clear it.
4786 */
4787 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4788 {
4789 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4790 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4791 Log4Func(("Clearing inhibition due to STI\n"));
4792 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4793 }
4794
4795 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4796 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4797
4798 /*
4799 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4800 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4801 */
4802 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4803
4804 /*
4805 * If we eventually support nested-guest execution without unrestricted guest execution,
4806 * we should set fInterceptEvents here.
4807 */
4808 Assert(!fIsNestedGuest);
4809
4810 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4811 if (fStepping)
4812 rcStrict = VINF_EM_DBG_STEPPED;
4813 }
4814 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4815 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4816 return rcStrict;
4817 }
4818#else
4819 RT_NOREF(pVmcsInfo);
4820#endif
4821 }
4822
4823 /*
4824 * Validate.
4825 */
4826 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4827 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4828
4829 /*
4830 * Inject the event into the VMCS.
4831 */
4832 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4833 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4834 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4835 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4836 AssertRC(rc);
4837
4838 /*
4839 * Update guest CR2 if this is a page-fault.
4840 */
4841 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4842 pCtx->cr2 = GCPtrFault;
4843
4844 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4845 return VINF_SUCCESS;
4846}
4847
4848
4849/**
4850 * Evaluates the event to be delivered to the guest and sets it as the pending
4851 * event.
4852 *
4853 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4854 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4855 * NOT restore these force-flags.
4856 *
4857 * @returns Strict VBox status code (i.e. informational status codes too).
4858 * @param pVCpu The cross context virtual CPU structure.
4859 * @param pVmcsInfo The VMCS information structure.
4860 * @param pfIntrState Where to store the updated VMX guest-interruptibility
4861 * state.
4862 */
4863static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4864{
4865 Assert(pfIntrState);
4866 Assert(!TRPMHasTrap(pVCpu));
4867
4868 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
4869
4870 /*
4871 * Evaluate if a new event needs to be injected.
4872 * An event that's already pending has already performed all necessary checks.
4873 */
4874 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4875 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4876 {
4877 /** @todo SMI. SMIs take priority over NMIs. */
4878
4879 /*
4880 * NMIs.
4881 * NMIs take priority over external interrupts.
4882 */
4883 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4884 {
4885 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4886 {
4887 /* Finally, inject the NMI and we're done. */
4888 vmxHCSetPendingXcptNmi(pVCpu);
4889 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4890 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4891 return VINF_SUCCESS;
4892 }
4893 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4894 }
4895 else
4896 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4897
4898 /*
4899 * External interrupts (PIC/APIC).
4900 */
4901 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4902 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4903 {
4904 Assert(!DBGFIsStepping(pVCpu));
4905 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4906 AssertRC(rc);
4907
4908 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF)
4909 {
4910 /*
4911 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it.
4912 * We cannot re-request the interrupt from the controller again.
4913 */
4914 uint8_t u8Interrupt;
4915 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4916 if (RT_SUCCESS(rc))
4917 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4918 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4919 {
4920 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4921 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4922 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4923 /*
4924 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4925 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4926 * need to re-set this force-flag here.
4927 */
4928 }
4929 else
4930 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4931
4932 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4933 return VINF_SUCCESS;
4934 }
4935 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4936 }
4937 else
4938 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4939 }
4940 else
4941 {
4942 /*
4943 * An event is being injected or we are in an interrupt shadow.
4944 * If another event is pending currently, instruct VT-x to cause a VM-exit as
4945 * soon as the guest is ready to accept it.
4946 */
4947 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4948 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4949 else
4950 {
4951 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4952 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4953 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4954 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4955 else
4956 {
4957 /* It's possible that interrupt-window exiting is still active, clear it as it's now unnecessary. */
4958 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4959 }
4960 }
4961 }
4962
4963 return VINF_SUCCESS;
4964}
4965
4966
4967#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4968/**
4969 * Evaluates the event to be delivered to the nested-guest and sets it as the
4970 * pending event.
4971 *
4972 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4973 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4974 * NOT restore these force-flags.
4975 *
4976 * @returns Strict VBox status code (i.e. informational status codes too).
4977 * @param pVCpu The cross context virtual CPU structure.
4978 * @param pVmcsInfo The VMCS information structure.
4979 * @param pfIntrState Where to store the updated VMX guest-interruptibility
4980 * state.
4981 *
4982 * @remarks The guest must be in VMX non-root mode.
4983 */
4984static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4985{
4986 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4987
4988 Assert(pfIntrState);
4989 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
4990 Assert(!TRPMHasTrap(pVCpu));
4991
4992 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
4993
4994 /*
4995 * If we are injecting an event, all necessary checks have been performed.
4996 * Any interrupt-window or NMI-window exiting would have been setup by the
4997 * nested-guest while we merged controls.
4998 */
4999 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5000 return VINF_SUCCESS;
5001
5002 /*
5003 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been
5004 * made pending (TRPM to HM event) and would be handled above if we resumed
5005 * execution in HM. If somehow we fell back to emulation after the
5006 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt
5007 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX
5008 * intercepts should be active and any events pending here have been generated
5009 * while executing the guest in VMX non-root mode after virtual VM-entry completed.
5010 */
5011 Assert(CPUMIsGuestVmxInterceptEvents(pCtx));
5012
5013 /*
5014 * Interrupt shadows MAY block NMIs.
5015 * They also blocks external-interrupts and MAY block external-interrupt VM-exits.
5016 *
5017 * See Intel spec. 24.4.2 "Guest Non-Register State".
5018 * See Intel spec. 25.4.1 "Event Blocking".
5019 */
5020 if (!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
5021 { /* likely */ }
5022 else
5023 return VINF_SUCCESS;
5024
5025 /** @todo SMI. SMIs take priority over NMIs. */
5026
5027 /*
5028 * NMIs.
5029 * NMIs take priority over interrupts.
5030 */
5031 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
5032 {
5033 /*
5034 * Nested-guest NMI-window exiting.
5035 * The NMI-window exit must happen regardless of whether an NMI is pending
5036 * provided virtual-NMI blocking is not in effect.
5037 *
5038 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5039 */
5040 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
5041 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx))
5042 {
5043 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5044 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
5045 }
5046
5047 /*
5048 * For a nested-guest, the FF always indicates the outer guest's ability to
5049 * receive an NMI while the guest-interruptibility state bit depends on whether
5050 * the nested-hypervisor is using virtual-NMIs.
5051 *
5052 * It is very important that we also clear the force-flag if we are causing
5053 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal
5054 * with re-injecting or discarding the NMI. This fixes the bug that showed up
5055 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}.
5056 */
5057 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5058 {
5059 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
5060 return IEMExecVmxVmexitXcptNmi(pVCpu);
5061 vmxHCSetPendingXcptNmi(pVCpu);
5062 return VINF_SUCCESS;
5063 }
5064 }
5065
5066 /*
5067 * Nested-guest interrupt-window exiting.
5068 *
5069 * We must cause the interrupt-window exit regardless of whether an interrupt is pending
5070 * provided virtual interrupts are enabled.
5071 *
5072 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5073 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5074 */
5075 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
5076 && CPUMIsGuestVmxVirtIntrEnabled(pCtx))
5077 {
5078 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
5079 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
5080 }
5081
5082 /*
5083 * External interrupts (PIC/APIC).
5084 *
5085 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF.
5086 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always.
5087 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued
5088 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5089 *
5090 * NMIs block external interrupts as they are dispatched through the interrupt gate (vector 2)
5091 * which automatically clears EFLAGS.IF. Also it's possible an NMI handler could enable interrupts
5092 * and thus we should not check for NMI inhibition here.
5093 *
5094 * See Intel spec. 25.4.1 "Event Blocking".
5095 * See Intel spec. 6.8.1 "Masking Maskable Hardware Interrupts".
5096 */
5097 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5098 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5099 {
5100 Assert(!DBGFIsStepping(pVCpu));
5101 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
5102 AssertRC(rc);
5103 if (CPUMIsGuestVmxPhysIntrEnabled(pCtx))
5104 {
5105 /* Nested-guest external interrupt VM-exit. */
5106 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
5107 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
5108 {
5109 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5110 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5111 return rcStrict;
5112 }
5113
5114 /*
5115 * Fetch the external interrupt from the interrupt controller.
5116 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to
5117 * the nested-hypervisor. We cannot re-request the interrupt from the controller again.
5118 */
5119 uint8_t u8Interrupt;
5120 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5121 if (RT_SUCCESS(rc))
5122 {
5123 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */
5124 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5125 {
5126 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT));
5127 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5128 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5129 return rcStrict;
5130 }
5131 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5132 return VINF_SUCCESS;
5133 }
5134 }
5135 }
5136 return VINF_SUCCESS;
5137}
5138#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5139
5140
5141/**
5142 * Injects any pending events into the guest if the guest is in a state to
5143 * receive them.
5144 *
5145 * @returns Strict VBox status code (i.e. informational status codes too).
5146 * @param pVCpu The cross context virtual CPU structure.
5147 * @param pVmcsInfo The VMCS information structure.
5148 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5149 * @param fIntrState The VT-x guest-interruptibility state.
5150 * @param fStepping Whether we are single-stepping the guest using the
5151 * hypervisor debugger and should return
5152 * VINF_EM_DBG_STEPPED if the event was dispatched
5153 * directly.
5154 */
5155static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5156 uint32_t fIntrState, bool fStepping)
5157{
5158 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5159#ifndef IN_NEM_DARWIN
5160 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5161#endif
5162
5163#ifdef VBOX_STRICT
5164 /*
5165 * Verify guest-interruptibility state.
5166 *
5167 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5168 * since injecting an event may modify the interruptibility state and we must thus always
5169 * use fIntrState.
5170 */
5171 {
5172 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5173 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5174 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5175 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5176 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5177 Assert(!TRPMHasTrap(pVCpu));
5178 NOREF(fBlockMovSS); NOREF(fBlockSti);
5179 }
5180#endif
5181
5182 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5183 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5184 {
5185 /*
5186 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5187 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5188 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5189 *
5190 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5191 */
5192 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5193#ifdef VBOX_STRICT
5194 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5195 {
5196 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5197 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5198 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5199 }
5200 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5201 {
5202 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5203 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5204 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5205 }
5206#endif
5207 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5208 uIntType));
5209
5210 /*
5211 * Inject the event and get any changes to the guest-interruptibility state.
5212 *
5213 * The guest-interruptibility state may need to be updated if we inject the event
5214 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5215 */
5216 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5217 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5218
5219 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5220 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5221 else
5222 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5223 }
5224
5225 /*
5226 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5227 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5228 */
5229 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5230 && !fIsNestedGuest)
5231 {
5232 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5233
5234 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5235 {
5236 /*
5237 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5238 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5239 */
5240 Assert(!DBGFIsStepping(pVCpu));
5241 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5242 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5243 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5244 AssertRC(rc);
5245 }
5246 else
5247 {
5248 /*
5249 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5250 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5251 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5252 * we use MTF, so just make sure it's called before executing guest-code.
5253 */
5254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5255 }
5256 }
5257 /* else: for nested-guest currently handling while merging controls. */
5258
5259 /*
5260 * Finally, update the guest-interruptibility state.
5261 *
5262 * This is required for the real-on-v86 software interrupt injection, for
5263 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5264 */
5265 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5266 AssertRC(rc);
5267
5268 /*
5269 * There's no need to clear the VM-entry interruption-information field here if we're not
5270 * injecting anything. VT-x clears the valid bit on every VM-exit.
5271 *
5272 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5273 */
5274
5275 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5276 return rcStrict;
5277}
5278
5279
5280/**
5281 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5282 * and update error record fields accordingly.
5283 *
5284 * @returns VMX_IGS_* error codes.
5285 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5286 * wrong with the guest state.
5287 *
5288 * @param pVCpu The cross context virtual CPU structure.
5289 * @param pVmcsInfo The VMCS info. object.
5290 *
5291 * @remarks This function assumes our cache of the VMCS controls
5292 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5293 */
5294static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5295{
5296#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5297#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5298
5299 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5300 uint32_t uError = VMX_IGS_ERROR;
5301 uint32_t u32IntrState = 0;
5302#ifndef IN_NEM_DARWIN
5303 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5304 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5305#else
5306 bool const fUnrestrictedGuest = true;
5307#endif
5308 do
5309 {
5310 int rc;
5311
5312 /*
5313 * Guest-interruptibility state.
5314 *
5315 * Read this first so that any check that fails prior to those that actually
5316 * require the guest-interruptibility state would still reflect the correct
5317 * VMCS value and avoids causing further confusion.
5318 */
5319 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5320 AssertRC(rc);
5321
5322 uint32_t u32Val;
5323 uint64_t u64Val;
5324
5325 /*
5326 * CR0.
5327 */
5328 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5329 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5330 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5331 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5332 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5333 if (fUnrestrictedGuest)
5334 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5335
5336 uint64_t u64GuestCr0;
5337 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5338 AssertRC(rc);
5339 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5340 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5341 if ( !fUnrestrictedGuest
5342 && (u64GuestCr0 & X86_CR0_PG)
5343 && !(u64GuestCr0 & X86_CR0_PE))
5344 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5345
5346 /*
5347 * CR4.
5348 */
5349 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5350 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5351 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5352
5353 uint64_t u64GuestCr4;
5354 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5355 AssertRC(rc);
5356 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5357 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5358
5359 /*
5360 * IA32_DEBUGCTL MSR.
5361 */
5362 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5363 AssertRC(rc);
5364 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5365 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5366 {
5367 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5368 }
5369 uint64_t u64DebugCtlMsr = u64Val;
5370
5371#ifdef VBOX_STRICT
5372 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5373 AssertRC(rc);
5374 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5375#endif
5376 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5377
5378 /*
5379 * RIP and RFLAGS.
5380 */
5381 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5382 AssertRC(rc);
5383 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5384 if ( !fLongModeGuest
5385 || !pCtx->cs.Attr.n.u1Long)
5386 {
5387 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5388 }
5389 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5390 * must be identical if the "IA-32e mode guest" VM-entry
5391 * control is 1 and CS.L is 1. No check applies if the
5392 * CPU supports 64 linear-address bits. */
5393
5394 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5395 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5396 AssertRC(rc);
5397 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5398 VMX_IGS_RFLAGS_RESERVED);
5399 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5400 uint32_t const u32Eflags = u64Val;
5401
5402 if ( fLongModeGuest
5403 || ( fUnrestrictedGuest
5404 && !(u64GuestCr0 & X86_CR0_PE)))
5405 {
5406 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5407 }
5408
5409 uint32_t u32EntryInfo;
5410 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5411 AssertRC(rc);
5412 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5413 {
5414 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5415 }
5416
5417 /*
5418 * 64-bit checks.
5419 */
5420 if (fLongModeGuest)
5421 {
5422 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5423 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5424 }
5425
5426 if ( !fLongModeGuest
5427 && (u64GuestCr4 & X86_CR4_PCIDE))
5428 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5429
5430 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5431 * 51:32 beyond the processor's physical-address width are 0. */
5432
5433 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5434 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5435 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5436
5437#ifndef IN_NEM_DARWIN
5438 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5439 AssertRC(rc);
5440 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5441
5442 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5443 AssertRC(rc);
5444 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5445#endif
5446
5447 /*
5448 * PERF_GLOBAL MSR.
5449 */
5450 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5451 {
5452 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5453 AssertRC(rc);
5454 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5455 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5456 }
5457
5458 /*
5459 * PAT MSR.
5460 */
5461 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5462 {
5463 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5464 AssertRC(rc);
5465 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5466 for (unsigned i = 0; i < 8; i++)
5467 {
5468 uint8_t u8Val = (u64Val & 0xff);
5469 if ( u8Val > MSR_IA32_PAT_MT_UCD
5470 || u8Val == MSR_IA32_PAT_MT_RSVD_2
5471 || u8Val == MSR_IA32_PAT_MT_RSVD_3)
5472 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5473 u64Val >>= 8;
5474 }
5475 }
5476
5477 /*
5478 * EFER MSR.
5479 */
5480 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5481 {
5482 Assert(g_fHmVmxSupportsVmcsEfer);
5483 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5484 AssertRC(rc);
5485 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5486 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5487 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5488 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5489 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5490 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5491 * iemVmxVmentryCheckGuestState(). */
5492 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5493 || !(u64GuestCr0 & X86_CR0_PG)
5494 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5495 VMX_IGS_EFER_LMA_LME_MISMATCH);
5496 }
5497
5498 /*
5499 * Segment registers.
5500 */
5501 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5502 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5503 if (!(u32Eflags & X86_EFL_VM))
5504 {
5505 /* CS */
5506 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5507 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5508 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5509 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5510 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5511 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5512 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5513 /* CS cannot be loaded with NULL in protected mode. */
5514 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5515 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5516 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5517 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5518 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5519 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5520 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5521 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5522 else
5523 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5524
5525 /* SS */
5526 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5527 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5528 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5529 if ( !(pCtx->cr0 & X86_CR0_PE)
5530 || pCtx->cs.Attr.n.u4Type == 3)
5531 {
5532 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5533 }
5534
5535 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5536 {
5537 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5538 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5539 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5540 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5541 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5542 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5543 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5544 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5545 }
5546
5547 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5548 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5549 {
5550 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5551 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5552 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5553 || pCtx->ds.Attr.n.u4Type > 11
5554 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5555 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5556 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5557 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5558 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5559 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5560 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5561 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5562 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5563 }
5564 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5565 {
5566 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5567 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5568 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5569 || pCtx->es.Attr.n.u4Type > 11
5570 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5571 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5572 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5573 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5574 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5575 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5576 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5577 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5578 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5579 }
5580 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5581 {
5582 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5583 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5584 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5585 || pCtx->fs.Attr.n.u4Type > 11
5586 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5587 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5588 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5589 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5590 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5591 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5592 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5593 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5594 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5595 }
5596 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5597 {
5598 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5599 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5600 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5601 || pCtx->gs.Attr.n.u4Type > 11
5602 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5603 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5604 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5605 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5606 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5607 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5608 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5609 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5610 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5611 }
5612 /* 64-bit capable CPUs. */
5613 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5614 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5615 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5616 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5617 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5618 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5619 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5620 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5621 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5622 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5623 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5624 }
5625 else
5626 {
5627 /* V86 mode checks. */
5628 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5629 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5630 {
5631 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5632 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5633 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5634 }
5635 else
5636 {
5637 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5638 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5639 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5640 }
5641
5642 /* CS */
5643 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5644 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5645 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5646 /* SS */
5647 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5648 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5649 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5650 /* DS */
5651 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5652 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5653 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5654 /* ES */
5655 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5656 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5657 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5658 /* FS */
5659 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5660 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5661 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5662 /* GS */
5663 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5664 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5665 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5666 /* 64-bit capable CPUs. */
5667 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5668 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5669 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5670 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5671 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5672 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5673 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5674 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5675 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5676 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5677 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5678 }
5679
5680 /*
5681 * TR.
5682 */
5683 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5684 /* 64-bit capable CPUs. */
5685 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5686 if (fLongModeGuest)
5687 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5688 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5689 else
5690 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5691 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5692 VMX_IGS_TR_ATTR_TYPE_INVALID);
5693 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5694 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5695 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5696 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5697 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5698 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5699 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5700 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5701
5702 /*
5703 * GDTR and IDTR (64-bit capable checks).
5704 */
5705 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5706 AssertRC(rc);
5707 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5708
5709 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5710 AssertRC(rc);
5711 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5712
5713 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5714 AssertRC(rc);
5715 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5716
5717 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5718 AssertRC(rc);
5719 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5720
5721 /*
5722 * Guest Non-Register State.
5723 */
5724 /* Activity State. */
5725 uint32_t u32ActivityState;
5726 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5727 AssertRC(rc);
5728 HMVMX_CHECK_BREAK( !u32ActivityState
5729 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5730 VMX_IGS_ACTIVITY_STATE_INVALID);
5731 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5732 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5733
5734 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5735 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5736 {
5737 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5738 }
5739
5740 /** @todo Activity state and injecting interrupts. Left as a todo since we
5741 * currently don't use activity states but ACTIVE. */
5742
5743 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5744 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5745
5746 /* Guest interruptibility-state. */
5747 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5748 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5749 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5750 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5751 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5752 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5753 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5754 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5755 {
5756 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5757 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5758 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5759 }
5760 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5761 {
5762 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5763 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5764 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5765 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5766 }
5767 /** @todo Assumes the processor is not in SMM. */
5768 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5769 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5770 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5771 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5772 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5773 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5774 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5775 {
5776 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5777 }
5778
5779 /* Pending debug exceptions. */
5780 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5781 AssertRC(rc);
5782 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5783 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5784 u32Val = u64Val; /* For pending debug exceptions checks below. */
5785
5786 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5787 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5788 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5789 {
5790 if ( (u32Eflags & X86_EFL_TF)
5791 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5792 {
5793 /* Bit 14 is PendingDebug.BS. */
5794 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5795 }
5796 if ( !(u32Eflags & X86_EFL_TF)
5797 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5798 {
5799 /* Bit 14 is PendingDebug.BS. */
5800 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5801 }
5802 }
5803
5804#ifndef IN_NEM_DARWIN
5805 /* VMCS link pointer. */
5806 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5807 AssertRC(rc);
5808 if (u64Val != UINT64_C(0xffffffffffffffff))
5809 {
5810 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5811 /** @todo Bits beyond the processor's physical-address width MBZ. */
5812 /** @todo SMM checks. */
5813 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5814 Assert(pVmcsInfo->pvShadowVmcs);
5815 VMXVMCSREVID VmcsRevId;
5816 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5817 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5818 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5819 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5820 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5821 }
5822
5823 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5824 * not using nested paging? */
5825 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5826 && !fLongModeGuest
5827 && CPUMIsGuestInPAEModeEx(pCtx))
5828 {
5829 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5830 AssertRC(rc);
5831 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5832
5833 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5834 AssertRC(rc);
5835 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5836
5837 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5838 AssertRC(rc);
5839 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5840
5841 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5842 AssertRC(rc);
5843 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5844 }
5845#endif
5846
5847 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5848 if (uError == VMX_IGS_ERROR)
5849 uError = VMX_IGS_REASON_NOT_FOUND;
5850 } while (0);
5851
5852 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5853 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5854 return uError;
5855
5856#undef HMVMX_ERROR_BREAK
5857#undef HMVMX_CHECK_BREAK
5858}
5859
5860
5861#ifndef HMVMX_USE_FUNCTION_TABLE
5862/**
5863 * Handles a guest VM-exit from hardware-assisted VMX execution.
5864 *
5865 * @returns Strict VBox status code (i.e. informational status codes too).
5866 * @param pVCpu The cross context virtual CPU structure.
5867 * @param pVmxTransient The VMX-transient structure.
5868 */
5869DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5870{
5871#ifdef DEBUG_ramshankar
5872# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5873 do { \
5874 if (a_fSave != 0) \
5875 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5876 VBOXSTRICTRC rcStrict = a_CallExpr; \
5877 if (a_fSave != 0) \
5878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5879 return rcStrict; \
5880 } while (0)
5881#else
5882# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5883#endif
5884 uint32_t const uExitReason = pVmxTransient->uExitReason;
5885 switch (uExitReason)
5886 {
5887 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5888 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5889 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5890 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5891 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5892 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5893 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5894 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5895 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5896 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5897 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5898 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5899 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5900 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5901 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5902 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5903 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5904 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5905 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5906 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5907 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5908 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5909 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5910 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5911 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5912 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5913 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5914 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5915 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5916 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5917#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5918 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5919 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5920 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5921 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5922 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5923 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5924 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5925 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5926 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5927 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5928#else
5929 case VMX_EXIT_VMCLEAR:
5930 case VMX_EXIT_VMLAUNCH:
5931 case VMX_EXIT_VMPTRLD:
5932 case VMX_EXIT_VMPTRST:
5933 case VMX_EXIT_VMREAD:
5934 case VMX_EXIT_VMRESUME:
5935 case VMX_EXIT_VMWRITE:
5936 case VMX_EXIT_VMXOFF:
5937 case VMX_EXIT_VMXON:
5938 case VMX_EXIT_INVVPID:
5939 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5940#endif
5941#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5942 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5943#else
5944 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5945#endif
5946
5947 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5948 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5949 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5950
5951 case VMX_EXIT_INIT_SIGNAL:
5952 case VMX_EXIT_SIPI:
5953 case VMX_EXIT_IO_SMI:
5954 case VMX_EXIT_SMI:
5955 case VMX_EXIT_ERR_MSR_LOAD:
5956 case VMX_EXIT_ERR_MACHINE_CHECK:
5957 case VMX_EXIT_PML_FULL:
5958 case VMX_EXIT_VIRTUALIZED_EOI:
5959 case VMX_EXIT_GDTR_IDTR_ACCESS:
5960 case VMX_EXIT_LDTR_TR_ACCESS:
5961 case VMX_EXIT_APIC_WRITE:
5962 case VMX_EXIT_RDRAND:
5963 case VMX_EXIT_RSM:
5964 case VMX_EXIT_VMFUNC:
5965 case VMX_EXIT_ENCLS:
5966 case VMX_EXIT_RDSEED:
5967 case VMX_EXIT_XSAVES:
5968 case VMX_EXIT_XRSTORS:
5969 case VMX_EXIT_UMWAIT:
5970 case VMX_EXIT_TPAUSE:
5971 case VMX_EXIT_LOADIWKEY:
5972 default:
5973 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5974 }
5975#undef VMEXIT_CALL_RET
5976}
5977#endif /* !HMVMX_USE_FUNCTION_TABLE */
5978
5979
5980#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5981/**
5982 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5983 *
5984 * @returns Strict VBox status code (i.e. informational status codes too).
5985 * @param pVCpu The cross context virtual CPU structure.
5986 * @param pVmxTransient The VMX-transient structure.
5987 */
5988DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5989{
5990#ifdef DEBUG_ramshankar
5991# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5992 do { \
5993 if (a_fSave != 0) \
5994 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5995 VBOXSTRICTRC rcStrict = a_CallExpr; \
5996 return rcStrict; \
5997 } while (0)
5998#else
5999# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
6000#endif
6001
6002 uint32_t const uExitReason = pVmxTransient->uExitReason;
6003 switch (uExitReason)
6004 {
6005# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6006 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
6007 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
6008# else
6009 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
6010 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
6011# endif
6012 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
6013 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
6014 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
6015
6016 /*
6017 * We shouldn't direct host physical interrupts to the nested-guest.
6018 */
6019 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
6020
6021 /*
6022 * Instructions that cause VM-exits unconditionally or the condition is
6023 * always taken solely from the nested hypervisor (meaning if the VM-exit
6024 * happens, it's guaranteed to be a nested-guest VM-exit).
6025 *
6026 * - Provides VM-exit instruction length ONLY.
6027 */
6028 case VMX_EXIT_CPUID: /* Unconditional. */
6029 case VMX_EXIT_VMCALL:
6030 case VMX_EXIT_GETSEC:
6031 case VMX_EXIT_INVD:
6032 case VMX_EXIT_XSETBV:
6033 case VMX_EXIT_VMLAUNCH:
6034 case VMX_EXIT_VMRESUME:
6035 case VMX_EXIT_VMXOFF:
6036 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
6037 case VMX_EXIT_VMFUNC:
6038 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
6039
6040 /*
6041 * Instructions that cause VM-exits unconditionally or the condition is
6042 * always taken solely from the nested hypervisor (meaning if the VM-exit
6043 * happens, it's guaranteed to be a nested-guest VM-exit).
6044 *
6045 * - Provides VM-exit instruction length.
6046 * - Provides VM-exit information.
6047 * - Optionally provides Exit qualification.
6048 *
6049 * Since Exit qualification is 0 for all VM-exits where it is not
6050 * applicable, reading and passing it to the guest should produce
6051 * defined behavior.
6052 *
6053 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
6054 */
6055 case VMX_EXIT_INVEPT: /* Unconditional. */
6056 case VMX_EXIT_INVVPID:
6057 case VMX_EXIT_VMCLEAR:
6058 case VMX_EXIT_VMPTRLD:
6059 case VMX_EXIT_VMPTRST:
6060 case VMX_EXIT_VMXON:
6061 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
6062 case VMX_EXIT_LDTR_TR_ACCESS:
6063 case VMX_EXIT_RDRAND:
6064 case VMX_EXIT_RDSEED:
6065 case VMX_EXIT_XSAVES:
6066 case VMX_EXIT_XRSTORS:
6067 case VMX_EXIT_UMWAIT:
6068 case VMX_EXIT_TPAUSE:
6069 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6070
6071 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6072 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6073 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6074 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6075 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6076 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6077 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6078 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6079 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6080 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6081 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6082 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6083 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6084 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6085 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6086 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6087 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6088 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6089 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6090
6091 case VMX_EXIT_PREEMPT_TIMER:
6092 {
6093 /** @todo NSTVMX: Preempt timer. */
6094 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6095 }
6096
6097 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6098 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6099
6100 case VMX_EXIT_VMREAD:
6101 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6102
6103 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6104 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6105
6106 case VMX_EXIT_INIT_SIGNAL:
6107 case VMX_EXIT_SIPI:
6108 case VMX_EXIT_IO_SMI:
6109 case VMX_EXIT_SMI:
6110 case VMX_EXIT_ERR_MSR_LOAD:
6111 case VMX_EXIT_ERR_MACHINE_CHECK:
6112 case VMX_EXIT_PML_FULL:
6113 case VMX_EXIT_RSM:
6114 default:
6115 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6116 }
6117#undef VMEXIT_CALL_RET
6118}
6119#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6120
6121
6122/** @name VM-exit helpers.
6123 * @{
6124 */
6125/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6126/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6127/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6128
6129/** Macro for VM-exits called unexpectedly. */
6130#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6131 do { \
6132 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6133 return VERR_VMX_UNEXPECTED_EXIT; \
6134 } while (0)
6135
6136#ifdef VBOX_STRICT
6137# ifndef IN_NEM_DARWIN
6138/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6139# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6140 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6141
6142# define HMVMX_ASSERT_PREEMPT_CPUID() \
6143 do { \
6144 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6145 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6146 } while (0)
6147
6148# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6149 do { \
6150 AssertPtr((a_pVCpu)); \
6151 AssertPtr((a_pVmxTransient)); \
6152 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6153 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6154 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6155 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6156 Assert((a_pVmxTransient)->pVmcsInfo); \
6157 Assert(ASMIntAreEnabled()); \
6158 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6159 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6160 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6161 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6162 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6163 HMVMX_ASSERT_PREEMPT_CPUID(); \
6164 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6165 } while (0)
6166# else
6167# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6168# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6169# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6170 do { \
6171 AssertPtr((a_pVCpu)); \
6172 AssertPtr((a_pVmxTransient)); \
6173 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6174 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6175 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6176 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6177 Assert((a_pVmxTransient)->pVmcsInfo); \
6178 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6179 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6180 } while (0)
6181# endif
6182
6183# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6184 do { \
6185 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6186 Assert((a_pVmxTransient)->fIsNestedGuest); \
6187 } while (0)
6188
6189# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6190 do { \
6191 Log4Func(("\n")); \
6192 } while (0)
6193#else
6194# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6195 do { \
6196 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6197 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6198 } while (0)
6199
6200# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6201 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6202
6203# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6204#endif
6205
6206#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6207/** Macro that does the necessary privilege checks and intercepted VM-exits for
6208 * guests that attempted to execute a VMX instruction. */
6209# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6210 do \
6211 { \
6212 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6213 if (rcStrictTmp == VINF_SUCCESS) \
6214 { /* likely */ } \
6215 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6216 { \
6217 Assert((a_pVCpu)->hm.s.Event.fPending); \
6218 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6219 return VINF_SUCCESS; \
6220 } \
6221 else \
6222 { \
6223 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6224 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6225 } \
6226 } while (0)
6227
6228/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6229# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6230 do \
6231 { \
6232 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6233 (a_pGCPtrEffAddr)); \
6234 if (rcStrictTmp == VINF_SUCCESS) \
6235 { /* likely */ } \
6236 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6237 { \
6238 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6239 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6240 NOREF(uXcptTmp); \
6241 return VINF_SUCCESS; \
6242 } \
6243 else \
6244 { \
6245 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6246 return rcStrictTmp; \
6247 } \
6248 } while (0)
6249#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6250
6251
6252/**
6253 * Advances the guest RIP by the specified number of bytes.
6254 *
6255 * @param pVCpu The cross context virtual CPU structure.
6256 * @param cbInstr Number of bytes to advance the RIP by.
6257 *
6258 * @remarks No-long-jump zone!!!
6259 */
6260DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6261{
6262 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6263
6264 /*
6265 * Advance RIP.
6266 *
6267 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6268 * when the addition causes a "carry" into the upper half and check whether
6269 * we're in 64-bit and can go on with it or wether we should zap the top
6270 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6271 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6272 *
6273 * See PC wrap around tests in bs3-cpu-weird-1.
6274 */
6275 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6276 uint64_t const uRipNext = uRipPrev + cbInstr;
6277 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6278 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6279 pVCpu->cpum.GstCtx.rip = uRipNext;
6280 else
6281 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6282
6283 /*
6284 * Clear RF and interrupt shadowing.
6285 */
6286 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6287 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6288 else
6289 {
6290 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6291 {
6292 /** @todo \#DB - single step. */
6293 }
6294 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6295 }
6296 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6297
6298 /* Mark both RIP and RFLAGS as updated. */
6299 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6300}
6301
6302
6303/**
6304 * Advances the guest RIP after reading it from the VMCS.
6305 *
6306 * @returns VBox status code, no informational status codes.
6307 * @param pVCpu The cross context virtual CPU structure.
6308 * @param pVmxTransient The VMX-transient structure.
6309 *
6310 * @remarks No-long-jump zone!!!
6311 */
6312static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6313{
6314 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6315 /** @todo consider template here after checking callers. */
6316 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6317 AssertRCReturn(rc, rc);
6318
6319 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6320 return VINF_SUCCESS;
6321}
6322
6323
6324/**
6325 * Handle a condition that occurred while delivering an event through the guest or
6326 * nested-guest IDT.
6327 *
6328 * @returns Strict VBox status code (i.e. informational status codes too).
6329 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6330 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6331 * to continue execution of the guest which will delivery the \#DF.
6332 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6333 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6334 *
6335 * @param pVCpu The cross context virtual CPU structure.
6336 * @param pVmxTransient The VMX-transient structure.
6337 *
6338 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6339 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6340 * is due to an EPT violation, PML full or SPP-related event.
6341 *
6342 * @remarks No-long-jump zone!!!
6343 */
6344static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6345{
6346 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6347 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6348 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6349 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6350 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6351 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6352
6353 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6354 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6355 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6356 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6357 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6358 {
6359 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6360 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6361
6362 /*
6363 * If the event was a software interrupt (generated with INT n) or a software exception
6364 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6365 * can handle the VM-exit and continue guest execution which will re-execute the
6366 * instruction rather than re-injecting the exception, as that can cause premature
6367 * trips to ring-3 before injection and involve TRPM which currently has no way of
6368 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6369 * the problem).
6370 */
6371 IEMXCPTRAISE enmRaise;
6372 IEMXCPTRAISEINFO fRaiseInfo;
6373 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6374 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6375 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6376 {
6377 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6378 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6379 }
6380 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6381 {
6382 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6383 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6384 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6385
6386 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6387 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6388
6389 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6390
6391 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6392 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6393 {
6394 pVmxTransient->fVectoringPF = true;
6395 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6396 }
6397 }
6398 else
6399 {
6400 /*
6401 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6402 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6403 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6404 */
6405 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6406 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6407 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6408 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6409 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6410 }
6411
6412 /*
6413 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6414 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6415 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6416 * subsequent VM-entry would fail, see @bugref{7445}.
6417 *
6418 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6419 */
6420 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6421 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6422 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6423 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6424 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6425
6426 switch (enmRaise)
6427 {
6428 case IEMXCPTRAISE_CURRENT_XCPT:
6429 {
6430 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6431 Assert(rcStrict == VINF_SUCCESS);
6432 break;
6433 }
6434
6435 case IEMXCPTRAISE_PREV_EVENT:
6436 {
6437 uint32_t u32ErrCode;
6438 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6439 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6440 else
6441 u32ErrCode = 0;
6442
6443 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6444 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6445 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6446 pVCpu->cpum.GstCtx.cr2);
6447
6448 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6449 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6450 Assert(rcStrict == VINF_SUCCESS);
6451 break;
6452 }
6453
6454 case IEMXCPTRAISE_REEXEC_INSTR:
6455 Assert(rcStrict == VINF_SUCCESS);
6456 break;
6457
6458 case IEMXCPTRAISE_DOUBLE_FAULT:
6459 {
6460 /*
6461 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6462 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6463 */
6464 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6465 {
6466 pVmxTransient->fVectoringDoublePF = true;
6467 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6468 pVCpu->cpum.GstCtx.cr2));
6469 rcStrict = VINF_SUCCESS;
6470 }
6471 else
6472 {
6473 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6474 vmxHCSetPendingXcptDF(pVCpu);
6475 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6476 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6477 rcStrict = VINF_HM_DOUBLE_FAULT;
6478 }
6479 break;
6480 }
6481
6482 case IEMXCPTRAISE_TRIPLE_FAULT:
6483 {
6484 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6485 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6486 rcStrict = VINF_EM_RESET;
6487 break;
6488 }
6489
6490 case IEMXCPTRAISE_CPU_HANG:
6491 {
6492 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6493 rcStrict = VERR_EM_GUEST_CPU_HANG;
6494 break;
6495 }
6496
6497 default:
6498 {
6499 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6500 rcStrict = VERR_VMX_IPE_2;
6501 break;
6502 }
6503 }
6504 }
6505 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6506 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6507 {
6508 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6509 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6510 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6511 {
6512 /*
6513 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6514 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6515 * that virtual NMIs remain blocked until the IRET execution is completed.
6516 *
6517 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6518 */
6519 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6520 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6521 }
6522 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6523 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6524 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6525 {
6526 /*
6527 * Execution of IRET caused an EPT violation, page-modification log-full event or
6528 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6529 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6530 * that virtual NMIs remain blocked until the IRET execution is completed.
6531 *
6532 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6533 */
6534 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6535 {
6536 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6537 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6538 }
6539 }
6540 }
6541
6542 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6543 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6544 return rcStrict;
6545}
6546
6547
6548#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6549/**
6550 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6551 * guest attempting to execute a VMX instruction.
6552 *
6553 * @returns Strict VBox status code (i.e. informational status codes too).
6554 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6555 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6556 *
6557 * @param pVCpu The cross context virtual CPU structure.
6558 * @param uExitReason The VM-exit reason.
6559 *
6560 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6561 * @remarks No-long-jump zone!!!
6562 */
6563static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6564{
6565 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6566 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6567
6568 /*
6569 * The physical CPU would have already checked the CPU mode/code segment.
6570 * We shall just assert here for paranoia.
6571 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6572 */
6573 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6574 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6575 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6576
6577 if (uExitReason == VMX_EXIT_VMXON)
6578 {
6579 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6580
6581 /*
6582 * We check CR4.VMXE because it is required to be always set while in VMX operation
6583 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6584 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6585 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6586 */
6587 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6588 {
6589 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6590 vmxHCSetPendingXcptUD(pVCpu);
6591 return VINF_HM_PENDING_XCPT;
6592 }
6593 }
6594 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6595 {
6596 /*
6597 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6598 * (other than VMXON), we need to raise a #UD.
6599 */
6600 Log4Func(("Not in VMX root mode -> #UD\n"));
6601 vmxHCSetPendingXcptUD(pVCpu);
6602 return VINF_HM_PENDING_XCPT;
6603 }
6604
6605 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6606 return VINF_SUCCESS;
6607}
6608
6609
6610/**
6611 * Decodes the memory operand of an instruction that caused a VM-exit.
6612 *
6613 * The Exit qualification field provides the displacement field for memory
6614 * operand instructions, if any.
6615 *
6616 * @returns Strict VBox status code (i.e. informational status codes too).
6617 * @retval VINF_SUCCESS if the operand was successfully decoded.
6618 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6619 * operand.
6620 * @param pVCpu The cross context virtual CPU structure.
6621 * @param uExitInstrInfo The VM-exit instruction information field.
6622 * @param enmMemAccess The memory operand's access type (read or write).
6623 * @param GCPtrDisp The instruction displacement field, if any. For
6624 * RIP-relative addressing pass RIP + displacement here.
6625 * @param pGCPtrMem Where to store the effective destination memory address.
6626 *
6627 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6628 * virtual-8086 mode hence skips those checks while verifying if the
6629 * segment is valid.
6630 */
6631static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6632 PRTGCPTR pGCPtrMem)
6633{
6634 Assert(pGCPtrMem);
6635 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6636 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6637 | CPUMCTX_EXTRN_CR0);
6638
6639 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6640 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6641 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6642
6643 VMXEXITINSTRINFO ExitInstrInfo;
6644 ExitInstrInfo.u = uExitInstrInfo;
6645 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6646 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6647 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6648 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6649 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6650 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6651 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6652 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6653 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6654
6655 /*
6656 * Validate instruction information.
6657 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6658 */
6659 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6660 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6661 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6662 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6663 AssertLogRelMsgReturn(fIsMemOperand,
6664 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6665
6666 /*
6667 * Compute the complete effective address.
6668 *
6669 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6670 * See AMD spec. 4.5.2 "Segment Registers".
6671 */
6672 RTGCPTR GCPtrMem = GCPtrDisp;
6673 if (fBaseRegValid)
6674 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6675 if (fIdxRegValid)
6676 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6677
6678 RTGCPTR const GCPtrOff = GCPtrMem;
6679 if ( !fIsLongMode
6680 || iSegReg >= X86_SREG_FS)
6681 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6682 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6683
6684 /*
6685 * Validate effective address.
6686 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6687 */
6688 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6689 Assert(cbAccess > 0);
6690 if (fIsLongMode)
6691 {
6692 if (X86_IS_CANONICAL(GCPtrMem))
6693 {
6694 *pGCPtrMem = GCPtrMem;
6695 return VINF_SUCCESS;
6696 }
6697
6698 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6699 * "Data Limit Checks in 64-bit Mode". */
6700 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6701 vmxHCSetPendingXcptGP(pVCpu, 0);
6702 return VINF_HM_PENDING_XCPT;
6703 }
6704
6705 /*
6706 * This is a watered down version of iemMemApplySegment().
6707 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6708 * and segment CPL/DPL checks are skipped.
6709 */
6710 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6711 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6712 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6713
6714 /* Check if the segment is present and usable. */
6715 if ( pSel->Attr.n.u1Present
6716 && !pSel->Attr.n.u1Unusable)
6717 {
6718 Assert(pSel->Attr.n.u1DescType);
6719 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6720 {
6721 /* Check permissions for the data segment. */
6722 if ( enmMemAccess == VMXMEMACCESS_WRITE
6723 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6724 {
6725 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6726 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6727 return VINF_HM_PENDING_XCPT;
6728 }
6729
6730 /* Check limits if it's a normal data segment. */
6731 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6732 {
6733 if ( GCPtrFirst32 > pSel->u32Limit
6734 || GCPtrLast32 > pSel->u32Limit)
6735 {
6736 Log4Func(("Data segment limit exceeded. "
6737 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6738 GCPtrLast32, pSel->u32Limit));
6739 if (iSegReg == X86_SREG_SS)
6740 vmxHCSetPendingXcptSS(pVCpu, 0);
6741 else
6742 vmxHCSetPendingXcptGP(pVCpu, 0);
6743 return VINF_HM_PENDING_XCPT;
6744 }
6745 }
6746 else
6747 {
6748 /* Check limits if it's an expand-down data segment.
6749 Note! The upper boundary is defined by the B bit, not the G bit! */
6750 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6751 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6752 {
6753 Log4Func(("Expand-down data segment limit exceeded. "
6754 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6755 GCPtrLast32, pSel->u32Limit));
6756 if (iSegReg == X86_SREG_SS)
6757 vmxHCSetPendingXcptSS(pVCpu, 0);
6758 else
6759 vmxHCSetPendingXcptGP(pVCpu, 0);
6760 return VINF_HM_PENDING_XCPT;
6761 }
6762 }
6763 }
6764 else
6765 {
6766 /* Check permissions for the code segment. */
6767 if ( enmMemAccess == VMXMEMACCESS_WRITE
6768 || ( enmMemAccess == VMXMEMACCESS_READ
6769 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6770 {
6771 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6772 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6773 vmxHCSetPendingXcptGP(pVCpu, 0);
6774 return VINF_HM_PENDING_XCPT;
6775 }
6776
6777 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6778 if ( GCPtrFirst32 > pSel->u32Limit
6779 || GCPtrLast32 > pSel->u32Limit)
6780 {
6781 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6782 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6783 if (iSegReg == X86_SREG_SS)
6784 vmxHCSetPendingXcptSS(pVCpu, 0);
6785 else
6786 vmxHCSetPendingXcptGP(pVCpu, 0);
6787 return VINF_HM_PENDING_XCPT;
6788 }
6789 }
6790 }
6791 else
6792 {
6793 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6794 vmxHCSetPendingXcptGP(pVCpu, 0);
6795 return VINF_HM_PENDING_XCPT;
6796 }
6797
6798 *pGCPtrMem = GCPtrMem;
6799 return VINF_SUCCESS;
6800}
6801#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6802
6803
6804/**
6805 * VM-exit helper for LMSW.
6806 */
6807static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6808{
6809 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6810 AssertRCReturn(rc, rc);
6811
6812 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6813 AssertMsg( rcStrict == VINF_SUCCESS
6814 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6815
6816 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6817 if (rcStrict == VINF_IEM_RAISED_XCPT)
6818 {
6819 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6820 rcStrict = VINF_SUCCESS;
6821 }
6822
6823 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6824 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6825 return rcStrict;
6826}
6827
6828
6829/**
6830 * VM-exit helper for CLTS.
6831 */
6832static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6833{
6834 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6835 AssertRCReturn(rc, rc);
6836
6837 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6838 AssertMsg( rcStrict == VINF_SUCCESS
6839 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6840
6841 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6842 if (rcStrict == VINF_IEM_RAISED_XCPT)
6843 {
6844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6845 rcStrict = VINF_SUCCESS;
6846 }
6847
6848 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6849 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6850 return rcStrict;
6851}
6852
6853
6854/**
6855 * VM-exit helper for MOV from CRx (CRx read).
6856 */
6857static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6858{
6859 Assert(iCrReg < 16);
6860 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6861
6862 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6863 AssertRCReturn(rc, rc);
6864
6865 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6866 AssertMsg( rcStrict == VINF_SUCCESS
6867 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6868
6869 if (iGReg == X86_GREG_xSP)
6870 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6871 else
6872 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6873#ifdef VBOX_WITH_STATISTICS
6874 switch (iCrReg)
6875 {
6876 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6877 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6878 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6879 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6880 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6881 }
6882#endif
6883 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6884 return rcStrict;
6885}
6886
6887
6888/**
6889 * VM-exit helper for MOV to CRx (CRx write).
6890 */
6891static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6892{
6893 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6894
6895 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6896 AssertMsg( rcStrict == VINF_SUCCESS
6897 || rcStrict == VINF_IEM_RAISED_XCPT
6898 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6899
6900 switch (iCrReg)
6901 {
6902 case 0:
6903 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6904 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6905 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6906 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6907 break;
6908
6909 case 2:
6910 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6911 /* Nothing to do here, CR2 it's not part of the VMCS. */
6912 break;
6913
6914 case 3:
6915 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6916 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6917 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6918 break;
6919
6920 case 4:
6921 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6922 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6923#ifndef IN_NEM_DARWIN
6924 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6925 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6926#else
6927 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6928#endif
6929 break;
6930
6931 case 8:
6932 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6933 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6934 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6935 break;
6936
6937 default:
6938 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6939 break;
6940 }
6941
6942 if (rcStrict == VINF_IEM_RAISED_XCPT)
6943 {
6944 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6945 rcStrict = VINF_SUCCESS;
6946 }
6947 return rcStrict;
6948}
6949
6950
6951/**
6952 * VM-exit exception handler for \#PF (Page-fault exception).
6953 *
6954 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6955 */
6956static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6957{
6958 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6959 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6960
6961#ifndef IN_NEM_DARWIN
6962 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6963 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6964 { /* likely */ }
6965 else
6966#endif
6967 {
6968#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6969 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6970#endif
6971 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6972 if (!pVmxTransient->fVectoringDoublePF)
6973 {
6974 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6975 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6976 }
6977 else
6978 {
6979 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6980 Assert(!pVmxTransient->fIsNestedGuest);
6981 vmxHCSetPendingXcptDF(pVCpu);
6982 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6983 }
6984 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6985 return VINF_SUCCESS;
6986 }
6987
6988 Assert(!pVmxTransient->fIsNestedGuest);
6989
6990 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6991 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6992 if (pVmxTransient->fVectoringPF)
6993 {
6994 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6995 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6996 }
6997
6998 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6999 AssertRCReturn(rc, rc);
7000
7001 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
7002 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
7003
7004 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
7005 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
7006
7007 Log4Func(("#PF: rc=%Rrc\n", rc));
7008 if (rc == VINF_SUCCESS)
7009 {
7010 /*
7011 * This is typically a shadow page table sync or a MMIO instruction. But we may have
7012 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
7013 */
7014 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7015 TRPMResetTrap(pVCpu);
7016 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
7017 return rc;
7018 }
7019
7020 if (rc == VINF_EM_RAW_GUEST_TRAP)
7021 {
7022 if (!pVmxTransient->fVectoringDoublePF)
7023 {
7024 /* It's a guest page fault and needs to be reflected to the guest. */
7025 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
7026 TRPMResetTrap(pVCpu);
7027 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
7028 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7029 uGstErrorCode, pVmxTransient->uExitQual);
7030 }
7031 else
7032 {
7033 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7034 TRPMResetTrap(pVCpu);
7035 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
7036 vmxHCSetPendingXcptDF(pVCpu);
7037 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7038 }
7039
7040 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7041 return VINF_SUCCESS;
7042 }
7043
7044 TRPMResetTrap(pVCpu);
7045 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
7046 return rc;
7047}
7048
7049
7050/**
7051 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
7052 *
7053 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7054 */
7055static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7056{
7057 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7058 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
7059
7060 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7061 AssertRCReturn(rc, rc);
7062
7063 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
7064 {
7065 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7066 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7067
7068 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7069 * provides VM-exit instruction length. If this causes problem later,
7070 * disassemble the instruction like it's done on AMD-V. */
7071 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7072 AssertRCReturn(rc2, rc2);
7073 return rc;
7074 }
7075
7076 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7077 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7078 return VINF_SUCCESS;
7079}
7080
7081
7082/**
7083 * VM-exit exception handler for \#BP (Breakpoint exception).
7084 *
7085 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7086 */
7087static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7088{
7089 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7090 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7091
7092 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7093 AssertRCReturn(rc, rc);
7094
7095 VBOXSTRICTRC rcStrict;
7096 if (!pVmxTransient->fIsNestedGuest)
7097 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7098 else
7099 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7100
7101 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7102 {
7103 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7104 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7105 rcStrict = VINF_SUCCESS;
7106 }
7107
7108 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7109 return rcStrict;
7110}
7111
7112
7113/**
7114 * VM-exit exception handler for \#AC (Alignment-check exception).
7115 *
7116 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7117 */
7118static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7119{
7120 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7121
7122 /*
7123 * Detect #ACs caused by host having enabled split-lock detection.
7124 * Emulate such instructions.
7125 */
7126#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7127 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7128 AssertRCReturn(rc, rc);
7129 /** @todo detect split lock in cpu feature? */
7130 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7131 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7132 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7133 || CPUMGetGuestCPL(pVCpu) != 3
7134 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7135 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7136 {
7137 /*
7138 * Check for debug/trace events and import state accordingly.
7139 */
7140 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7141 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7142 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7143#ifndef IN_NEM_DARWIN
7144 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7145#endif
7146 )
7147 {
7148 if (pVM->cCpus == 1)
7149 {
7150#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7151 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7152 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7153#else
7154 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7155 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7156#endif
7157 AssertRCReturn(rc, rc);
7158 }
7159 }
7160 else
7161 {
7162 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7163 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7164 AssertRCReturn(rc, rc);
7165
7166 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7167
7168 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7169 {
7170 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7171 if (rcStrict != VINF_SUCCESS)
7172 return rcStrict;
7173 }
7174 }
7175
7176 /*
7177 * Emulate the instruction.
7178 *
7179 * We have to ignore the LOCK prefix here as we must not retrigger the
7180 * detection on the host. This isn't all that satisfactory, though...
7181 */
7182 if (pVM->cCpus == 1)
7183 {
7184 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7185 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7186
7187 /** @todo For SMP configs we should do a rendezvous here. */
7188 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7189 if (rcStrict == VINF_SUCCESS)
7190#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7191 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7192 HM_CHANGED_GUEST_RIP
7193 | HM_CHANGED_GUEST_RFLAGS
7194 | HM_CHANGED_GUEST_GPRS_MASK
7195 | HM_CHANGED_GUEST_CS
7196 | HM_CHANGED_GUEST_SS);
7197#else
7198 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7199#endif
7200 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7201 {
7202 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7203 rcStrict = VINF_SUCCESS;
7204 }
7205 return rcStrict;
7206 }
7207 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7208 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7209 return VINF_EM_EMULATE_SPLIT_LOCK;
7210 }
7211
7212 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7213 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7214 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7215
7216 /* Re-inject it. We'll detect any nesting before getting here. */
7217 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7218 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7219 return VINF_SUCCESS;
7220}
7221
7222
7223/**
7224 * VM-exit exception handler for \#DB (Debug exception).
7225 *
7226 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7227 */
7228static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7229{
7230 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7231 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7232
7233 /*
7234 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7235 */
7236 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7237
7238 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7239 uint64_t const uDR6 = X86_DR6_INIT_VAL
7240 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7241 | X86_DR6_BD | X86_DR6_BS));
7242 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7243
7244 int rc;
7245 if (!pVmxTransient->fIsNestedGuest)
7246 {
7247 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7248
7249 /*
7250 * Prevents stepping twice over the same instruction when the guest is stepping using
7251 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7252 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7253 */
7254 if ( rc == VINF_EM_DBG_STEPPED
7255 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7256 {
7257 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7258 rc = VINF_EM_RAW_GUEST_TRAP;
7259 }
7260 }
7261 else
7262 rc = VINF_EM_RAW_GUEST_TRAP;
7263 Log6Func(("rc=%Rrc\n", rc));
7264 if (rc == VINF_EM_RAW_GUEST_TRAP)
7265 {
7266 /*
7267 * The exception was for the guest. Update DR6, DR7.GD and
7268 * IA32_DEBUGCTL.LBR before forwarding it.
7269 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7270 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7271 */
7272#ifndef IN_NEM_DARWIN
7273 VMMRZCallRing3Disable(pVCpu);
7274 HM_DISABLE_PREEMPT(pVCpu);
7275
7276 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7277 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7278 if (CPUMIsGuestDebugStateActive(pVCpu))
7279 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7280
7281 HM_RESTORE_PREEMPT();
7282 VMMRZCallRing3Enable(pVCpu);
7283#else
7284 /** @todo */
7285#endif
7286
7287 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7288 AssertRCReturn(rc, rc);
7289
7290 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7291 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7292
7293 /* Paranoia. */
7294 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7295 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7296
7297 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7298 AssertRC(rc);
7299
7300 /*
7301 * Raise #DB in the guest.
7302 *
7303 * It is important to reflect exactly what the VM-exit gave us (preserving the
7304 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7305 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7306 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7307 *
7308 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7309 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7310 */
7311 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7312 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7313 return VINF_SUCCESS;
7314 }
7315
7316 /*
7317 * Not a guest trap, must be a hypervisor related debug event then.
7318 * Update DR6 in case someone is interested in it.
7319 */
7320 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7321 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7322 CPUMSetHyperDR6(pVCpu, uDR6);
7323
7324 return rc;
7325}
7326
7327
7328/**
7329 * Hacks its way around the lovely mesa driver's backdoor accesses.
7330 *
7331 * @sa hmR0SvmHandleMesaDrvGp.
7332 */
7333static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7334{
7335 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7336 RT_NOREF(pCtx);
7337
7338 /* For now we'll just skip the instruction. */
7339 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7340}
7341
7342
7343/**
7344 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7345 * backdoor logging w/o checking what it is running inside.
7346 *
7347 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7348 * backdoor port and magic numbers loaded in registers.
7349 *
7350 * @returns true if it is, false if it isn't.
7351 * @sa hmR0SvmIsMesaDrvGp.
7352 */
7353DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7354{
7355 /* 0xed: IN eAX,dx */
7356 uint8_t abInstr[1];
7357 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7358 return false;
7359
7360 /* Check that it is #GP(0). */
7361 if (pVmxTransient->uExitIntErrorCode != 0)
7362 return false;
7363
7364 /* Check magic and port. */
7365 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7366 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7367 if (pCtx->rax != UINT32_C(0x564d5868))
7368 return false;
7369 if (pCtx->dx != UINT32_C(0x5658))
7370 return false;
7371
7372 /* Flat ring-3 CS. */
7373 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7374 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7375 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7376 if (pCtx->cs.Attr.n.u2Dpl != 3)
7377 return false;
7378 if (pCtx->cs.u64Base != 0)
7379 return false;
7380
7381 /* Check opcode. */
7382 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7383 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7384 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7385 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7386 if (RT_FAILURE(rc))
7387 return false;
7388 if (abInstr[0] != 0xed)
7389 return false;
7390
7391 return true;
7392}
7393
7394
7395/**
7396 * VM-exit exception handler for \#GP (General-protection exception).
7397 *
7398 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7399 */
7400static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7401{
7402 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7403 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7404
7405 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7406 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7407#ifndef IN_NEM_DARWIN
7408 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7409 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7410 { /* likely */ }
7411 else
7412#endif
7413 {
7414#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7415# ifndef IN_NEM_DARWIN
7416 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7417# else
7418 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7419# endif
7420#endif
7421 /*
7422 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7423 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7424 */
7425 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7426 AssertRCReturn(rc, rc);
7427 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7428 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7429
7430 if ( pVmxTransient->fIsNestedGuest
7431 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7432 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7433 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7434 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7435 else
7436 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7437 return rc;
7438 }
7439
7440#ifndef IN_NEM_DARWIN
7441 Assert(CPUMIsGuestInRealModeEx(pCtx));
7442 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7443 Assert(!pVmxTransient->fIsNestedGuest);
7444
7445 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7446 AssertRCReturn(rc, rc);
7447
7448 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7449 if (rcStrict == VINF_SUCCESS)
7450 {
7451 if (!CPUMIsGuestInRealModeEx(pCtx))
7452 {
7453 /*
7454 * The guest is no longer in real-mode, check if we can continue executing the
7455 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7456 */
7457 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7458 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7459 {
7460 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7461 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7462 }
7463 else
7464 {
7465 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7466 rcStrict = VINF_EM_RESCHEDULE;
7467 }
7468 }
7469 else
7470 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7471 }
7472 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7473 {
7474 rcStrict = VINF_SUCCESS;
7475 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7476 }
7477 return VBOXSTRICTRC_VAL(rcStrict);
7478#endif
7479}
7480
7481
7482/**
7483 * VM-exit exception handler for \#DE (Divide Error).
7484 *
7485 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7486 */
7487static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7488{
7489 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7490 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7491
7492 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7493 AssertRCReturn(rc, rc);
7494
7495 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7496 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7497 {
7498 uint8_t cbInstr = 0;
7499 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7500 if (rc2 == VINF_SUCCESS)
7501 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7502 else if (rc2 == VERR_NOT_FOUND)
7503 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7504 else
7505 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7506 }
7507 else
7508 rcStrict = VINF_SUCCESS; /* Do nothing. */
7509
7510 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7511 if (RT_FAILURE(rcStrict))
7512 {
7513 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7514 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7515 rcStrict = VINF_SUCCESS;
7516 }
7517
7518 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7519 return VBOXSTRICTRC_VAL(rcStrict);
7520}
7521
7522
7523/**
7524 * VM-exit exception handler wrapper for all other exceptions that are not handled
7525 * by a specific handler.
7526 *
7527 * This simply re-injects the exception back into the VM without any special
7528 * processing.
7529 *
7530 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7531 */
7532static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7533{
7534 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7535
7536#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7537# ifndef IN_NEM_DARWIN
7538 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7539 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7540 ("uVector=%#x u32XcptBitmap=%#X32\n",
7541 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7542 NOREF(pVmcsInfo);
7543# endif
7544#endif
7545
7546 /*
7547 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7548 * would have been handled while checking exits due to event delivery.
7549 */
7550 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7551
7552#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7553 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7554 AssertRCReturn(rc, rc);
7555 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7556#endif
7557
7558#ifdef VBOX_WITH_STATISTICS
7559 switch (uVector)
7560 {
7561 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7562 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7563 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7564 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7565 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7566 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7567 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7568 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7569 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7570 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7571 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7572 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7573 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7574 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7575 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7576 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7577 default:
7578 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7579 break;
7580 }
7581#endif
7582
7583 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7584 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7585 NOREF(uVector);
7586
7587 /* Re-inject the original exception into the guest. */
7588 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7589 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7590 return VINF_SUCCESS;
7591}
7592
7593
7594/**
7595 * VM-exit exception handler for all exceptions (except NMIs!).
7596 *
7597 * @remarks This may be called for both guests and nested-guests. Take care to not
7598 * make assumptions and avoid doing anything that is not relevant when
7599 * executing a nested-guest (e.g., Mesa driver hacks).
7600 */
7601static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7602{
7603 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7604
7605 /*
7606 * If this VM-exit occurred while delivering an event through the guest IDT, take
7607 * action based on the return code and additional hints (e.g. for page-faults)
7608 * that will be updated in the VMX transient structure.
7609 */
7610 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7611 if (rcStrict == VINF_SUCCESS)
7612 {
7613 /*
7614 * If an exception caused a VM-exit due to delivery of an event, the original
7615 * event may have to be re-injected into the guest. We shall reinject it and
7616 * continue guest execution. However, page-fault is a complicated case and
7617 * needs additional processing done in vmxHCExitXcptPF().
7618 */
7619 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7620 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7621 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7622 || uVector == X86_XCPT_PF)
7623 {
7624 switch (uVector)
7625 {
7626 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7627 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7628 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7629 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7630 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7631 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7632 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7633 default:
7634 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7635 }
7636 }
7637 /* else: inject pending event before resuming guest execution. */
7638 }
7639 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7640 {
7641 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7642 rcStrict = VINF_SUCCESS;
7643 }
7644
7645 return rcStrict;
7646}
7647/** @} */
7648
7649
7650/** @name VM-exit handlers.
7651 * @{
7652 */
7653/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7654/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7655/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7656
7657/**
7658 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7659 */
7660HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7661{
7662 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7663 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7664
7665#ifndef IN_NEM_DARWIN
7666 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7667 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7668 return VINF_SUCCESS;
7669 return VINF_EM_RAW_INTERRUPT;
7670#else
7671 return VINF_SUCCESS;
7672#endif
7673}
7674
7675
7676/**
7677 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7678 * VM-exit.
7679 */
7680HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7681{
7682 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7683 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7684
7685 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7686
7687 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7688 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7689 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7690
7691 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7692 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7693 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7694 NOREF(pVmcsInfo);
7695
7696 VBOXSTRICTRC rcStrict;
7697 switch (uExitIntType)
7698 {
7699#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7700 /*
7701 * Host physical NMIs:
7702 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7703 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7704 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7705 *
7706 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7707 * See Intel spec. 27.5.5 "Updating Non-Register State".
7708 */
7709 case VMX_EXIT_INT_INFO_TYPE_NMI:
7710 {
7711 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7712 break;
7713 }
7714#endif
7715
7716 /*
7717 * Privileged software exceptions (#DB from ICEBP),
7718 * Software exceptions (#BP and #OF),
7719 * Hardware exceptions:
7720 * Process the required exceptions and resume guest execution if possible.
7721 */
7722 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7723 Assert(uVector == X86_XCPT_DB);
7724 RT_FALL_THRU();
7725 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7726 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7727 RT_FALL_THRU();
7728 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7729 {
7730 NOREF(uVector);
7731 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7732 | HMVMX_READ_EXIT_INSTR_LEN
7733 | HMVMX_READ_IDT_VECTORING_INFO
7734 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7735 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7736 break;
7737 }
7738
7739 default:
7740 {
7741 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7742 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7743 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7744 break;
7745 }
7746 }
7747
7748 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7749 return rcStrict;
7750}
7751
7752
7753/**
7754 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7755 */
7756HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7757{
7758 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7759
7760 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7761 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7762 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7763
7764 /* Evaluate and deliver pending events and resume guest execution. */
7765 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7766 return VINF_SUCCESS;
7767}
7768
7769
7770/**
7771 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7772 */
7773HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7774{
7775 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7776
7777 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7778 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7779 {
7780 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7781 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7782 }
7783
7784 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7785
7786 /*
7787 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7788 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7789 */
7790 uint32_t fIntrState;
7791 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7792 AssertRC(rc);
7793 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7794 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7795 {
7796 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7797
7798 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7799 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7800 AssertRC(rc);
7801 }
7802
7803 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */
7804 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7805
7806 /* Evaluate and deliver pending events and resume guest execution. */
7807 return VINF_SUCCESS;
7808}
7809
7810
7811/**
7812 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7813 */
7814HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7815{
7816 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7817 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7818}
7819
7820
7821/**
7822 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7823 */
7824HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7825{
7826 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7827 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7828}
7829
7830
7831/**
7832 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7833 */
7834HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7835{
7836 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7837
7838 /*
7839 * Get the state we need and update the exit history entry.
7840 */
7841 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7842 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7843 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7844 AssertRCReturn(rc, rc);
7845
7846 VBOXSTRICTRC rcStrict;
7847 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7848 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7849 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7850 if (!pExitRec)
7851 {
7852 /*
7853 * Regular CPUID instruction execution.
7854 */
7855 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7856 if (rcStrict == VINF_SUCCESS)
7857 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7858 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7859 {
7860 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7861 rcStrict = VINF_SUCCESS;
7862 }
7863 }
7864 else
7865 {
7866 /*
7867 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7868 */
7869 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7870 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7871 AssertRCReturn(rc2, rc2);
7872
7873 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7874 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7875
7876 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7877 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7878
7879 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7880 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7881 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7882 }
7883 return rcStrict;
7884}
7885
7886
7887/**
7888 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7889 */
7890HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7891{
7892 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7893
7894 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7895 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7896 AssertRCReturn(rc, rc);
7897
7898 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7899 return VINF_EM_RAW_EMULATE_INSTR;
7900
7901 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7902 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7903}
7904
7905
7906/**
7907 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7908 */
7909HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7910{
7911 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7912
7913 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7914 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7915 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7916 AssertRCReturn(rc, rc);
7917
7918 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7919 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7920 {
7921 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7922 we must reset offsetting on VM-entry. See @bugref{6634}. */
7923 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7924 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7925 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7926 }
7927 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7928 {
7929 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7930 rcStrict = VINF_SUCCESS;
7931 }
7932 return rcStrict;
7933}
7934
7935
7936/**
7937 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7938 */
7939HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7940{
7941 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7942
7943 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7944 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7945 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7946 AssertRCReturn(rc, rc);
7947
7948 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7949 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7950 {
7951 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7952 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7953 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7954 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7955 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7956 }
7957 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7958 {
7959 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7960 rcStrict = VINF_SUCCESS;
7961 }
7962 return rcStrict;
7963}
7964
7965
7966/**
7967 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7968 */
7969HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7970{
7971 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7972
7973 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7974 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7975 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7976 AssertRCReturn(rc, rc);
7977
7978 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7979 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7980 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7981 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7982 {
7983 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7984 rcStrict = VINF_SUCCESS;
7985 }
7986 return rcStrict;
7987}
7988
7989
7990/**
7991 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7992 */
7993HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7994{
7995 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7996
7997 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7998 if (EMAreHypercallInstructionsEnabled(pVCpu))
7999 {
8000 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8001 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
8002 | CPUMCTX_EXTRN_RFLAGS
8003 | CPUMCTX_EXTRN_CR0
8004 | CPUMCTX_EXTRN_SS
8005 | CPUMCTX_EXTRN_CS
8006 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
8007 AssertRCReturn(rc, rc);
8008
8009 /* Perform the hypercall. */
8010 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8011 if (rcStrict == VINF_SUCCESS)
8012 {
8013 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8014 AssertRCReturn(rc, rc);
8015 }
8016 else
8017 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
8018 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
8019 || RT_FAILURE(rcStrict));
8020
8021 /* If the hypercall changes anything other than guest's general-purpose registers,
8022 we would need to reload the guest changed bits here before VM-entry. */
8023 }
8024 else
8025 Log4Func(("Hypercalls not enabled\n"));
8026
8027 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
8028 if (RT_FAILURE(rcStrict))
8029 {
8030 vmxHCSetPendingXcptUD(pVCpu);
8031 rcStrict = VINF_SUCCESS;
8032 }
8033
8034 return rcStrict;
8035}
8036
8037
8038/**
8039 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8040 */
8041HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8042{
8043 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8044#ifndef IN_NEM_DARWIN
8045 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
8046#endif
8047
8048 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8049 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8050 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8051 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8052 AssertRCReturn(rc, rc);
8053
8054 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
8055
8056 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
8057 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8058 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8059 {
8060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8061 rcStrict = VINF_SUCCESS;
8062 }
8063 else
8064 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8065 VBOXSTRICTRC_VAL(rcStrict)));
8066 return rcStrict;
8067}
8068
8069
8070/**
8071 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8072 */
8073HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8074{
8075 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8076
8077 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8078 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8079 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8080 AssertRCReturn(rc, rc);
8081
8082 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8083 if (rcStrict == VINF_SUCCESS)
8084 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8085 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8086 {
8087 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8088 rcStrict = VINF_SUCCESS;
8089 }
8090
8091 return rcStrict;
8092}
8093
8094
8095/**
8096 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8097 */
8098HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8099{
8100 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8101
8102 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8103 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8104 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8105 AssertRCReturn(rc, rc);
8106
8107 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8108 if (RT_SUCCESS(rcStrict))
8109 {
8110 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8111 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8112 rcStrict = VINF_SUCCESS;
8113 }
8114
8115 return rcStrict;
8116}
8117
8118
8119/**
8120 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8121 * VM-exit.
8122 */
8123HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8124{
8125 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8126 return VINF_EM_RESET;
8127}
8128
8129
8130/**
8131 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8132 */
8133HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8134{
8135 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8136
8137 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8138 AssertRCReturn(rc, rc);
8139
8140 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8141 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8142 rc = VINF_SUCCESS;
8143 else
8144 rc = VINF_EM_HALT;
8145
8146 if (rc != VINF_SUCCESS)
8147 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8148 return rc;
8149}
8150
8151
8152#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8153/**
8154 * VM-exit handler for instructions that result in a \#UD exception delivered to
8155 * the guest.
8156 */
8157HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8158{
8159 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8160 vmxHCSetPendingXcptUD(pVCpu);
8161 return VINF_SUCCESS;
8162}
8163#endif
8164
8165
8166/**
8167 * VM-exit handler for expiry of the VMX-preemption timer.
8168 */
8169HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8170{
8171 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8172
8173 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8174 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8175Log12(("vmxHCExitPreemptTimer:\n"));
8176
8177 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8178 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8179 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8180 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8181 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8182}
8183
8184
8185/**
8186 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8187 */
8188HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8189{
8190 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8191
8192 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8193 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8194 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8195 AssertRCReturn(rc, rc);
8196
8197 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8198 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8199 : HM_CHANGED_RAISED_XCPT_MASK);
8200
8201#ifndef IN_NEM_DARWIN
8202 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8203 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8204 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8205 {
8206 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8207 hmR0VmxUpdateStartVmFunction(pVCpu);
8208 }
8209#endif
8210
8211 return rcStrict;
8212}
8213
8214
8215/**
8216 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8217 */
8218HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8219{
8220 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8221
8222 /** @todo Enable the new code after finding a reliably guest test-case. */
8223#if 1
8224 return VERR_EM_INTERPRETER;
8225#else
8226 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8227 | HMVMX_READ_EXIT_INSTR_INFO
8228 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8229 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8230 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8231 AssertRCReturn(rc, rc);
8232
8233 /* Paranoia. Ensure this has a memory operand. */
8234 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8235
8236 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8237 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8238 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8239 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8240
8241 RTGCPTR GCPtrDesc;
8242 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8243
8244 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8245 GCPtrDesc, uType);
8246 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8247 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8248 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8249 {
8250 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8251 rcStrict = VINF_SUCCESS;
8252 }
8253 return rcStrict;
8254#endif
8255}
8256
8257
8258/**
8259 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8260 * VM-exit.
8261 */
8262HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8263{
8264 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8265 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8266 AssertRCReturn(rc, rc);
8267
8268 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8269 if (RT_FAILURE(rc))
8270 return rc;
8271
8272 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8273 NOREF(uInvalidReason);
8274
8275#ifdef VBOX_STRICT
8276 uint32_t fIntrState;
8277 uint64_t u64Val;
8278 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8279 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8280 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8281
8282 Log4(("uInvalidReason %u\n", uInvalidReason));
8283 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8284 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8285 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8286
8287 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8288 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8289 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8290 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8291 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8292 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8293 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8294 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8295 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8296 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8297 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8298 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8299# ifndef IN_NEM_DARWIN
8300 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8301 {
8302 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8303 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8304 }
8305
8306 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8307# endif
8308#endif
8309
8310 return VERR_VMX_INVALID_GUEST_STATE;
8311}
8312
8313/**
8314 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8315 */
8316HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8317{
8318 /*
8319 * Cumulative notes of all recognized but unexpected VM-exits.
8320 *
8321 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8322 * nested-paging is used.
8323 *
8324 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8325 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8326 * this function (and thereby stop VM execution) for handling such instructions.
8327 *
8328 *
8329 * VMX_EXIT_INIT_SIGNAL:
8330 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8331 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8332 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8333 *
8334 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8335 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8336 * See Intel spec. "23.8 Restrictions on VMX operation".
8337 *
8338 * VMX_EXIT_SIPI:
8339 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8340 * activity state is used. We don't make use of it as our guests don't have direct
8341 * access to the host local APIC.
8342 *
8343 * See Intel spec. 25.3 "Other Causes of VM-exits".
8344 *
8345 * VMX_EXIT_IO_SMI:
8346 * VMX_EXIT_SMI:
8347 * This can only happen if we support dual-monitor treatment of SMI, which can be
8348 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8349 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8350 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8351 *
8352 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8353 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8354 *
8355 * VMX_EXIT_ERR_MSR_LOAD:
8356 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8357 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8358 * execution.
8359 *
8360 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8361 *
8362 * VMX_EXIT_ERR_MACHINE_CHECK:
8363 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8364 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8365 * #MC exception abort class exception is raised. We thus cannot assume a
8366 * reasonable chance of continuing any sort of execution and we bail.
8367 *
8368 * See Intel spec. 15.1 "Machine-check Architecture".
8369 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8370 *
8371 * VMX_EXIT_PML_FULL:
8372 * VMX_EXIT_VIRTUALIZED_EOI:
8373 * VMX_EXIT_APIC_WRITE:
8374 * We do not currently support any of these features and thus they are all unexpected
8375 * VM-exits.
8376 *
8377 * VMX_EXIT_GDTR_IDTR_ACCESS:
8378 * VMX_EXIT_LDTR_TR_ACCESS:
8379 * VMX_EXIT_RDRAND:
8380 * VMX_EXIT_RSM:
8381 * VMX_EXIT_VMFUNC:
8382 * VMX_EXIT_ENCLS:
8383 * VMX_EXIT_RDSEED:
8384 * VMX_EXIT_XSAVES:
8385 * VMX_EXIT_XRSTORS:
8386 * VMX_EXIT_UMWAIT:
8387 * VMX_EXIT_TPAUSE:
8388 * VMX_EXIT_LOADIWKEY:
8389 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8390 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8391 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8392 *
8393 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8394 */
8395 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8396 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8397 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8398}
8399
8400
8401/**
8402 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8403 */
8404HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8405{
8406 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8407
8408 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8409
8410 /** @todo Optimize this: We currently drag in the whole MSR state
8411 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8412 * MSRs required. That would require changes to IEM and possibly CPUM too.
8413 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8414 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8415 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8416 int rc;
8417 switch (idMsr)
8418 {
8419 default:
8420 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8421 __FUNCTION__);
8422 AssertRCReturn(rc, rc);
8423 break;
8424 case MSR_K8_FS_BASE:
8425 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8426 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8427 AssertRCReturn(rc, rc);
8428 break;
8429 case MSR_K8_GS_BASE:
8430 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8431 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8432 AssertRCReturn(rc, rc);
8433 break;
8434 }
8435
8436 Log4Func(("ecx=%#RX32\n", idMsr));
8437
8438#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8439 Assert(!pVmxTransient->fIsNestedGuest);
8440 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8441 {
8442 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8443 && idMsr != MSR_K6_EFER)
8444 {
8445 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8446 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8447 }
8448 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8449 {
8450 Assert(pVmcsInfo->pvMsrBitmap);
8451 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8452 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8453 {
8454 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8455 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8456 }
8457 }
8458 }
8459#endif
8460
8461 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8462 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8463 if (rcStrict == VINF_SUCCESS)
8464 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8465 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8466 {
8467 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8468 rcStrict = VINF_SUCCESS;
8469 }
8470 else
8471 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8472 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8473
8474 return rcStrict;
8475}
8476
8477
8478/**
8479 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8480 */
8481HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8482{
8483 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8484
8485 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8486
8487 /*
8488 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8489 * Although we don't need to fetch the base as it will be overwritten shortly, while
8490 * loading guest-state we would also load the entire segment register including limit
8491 * and attributes and thus we need to load them here.
8492 */
8493 /** @todo Optimize this: We currently drag in the whole MSR state
8494 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8495 * MSRs required. That would require changes to IEM and possibly CPUM too.
8496 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8497 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8498 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8499 int rc;
8500 switch (idMsr)
8501 {
8502 default:
8503 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8504 __FUNCTION__);
8505 AssertRCReturn(rc, rc);
8506 break;
8507
8508 case MSR_K8_FS_BASE:
8509 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8510 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8511 AssertRCReturn(rc, rc);
8512 break;
8513 case MSR_K8_GS_BASE:
8514 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8515 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8516 AssertRCReturn(rc, rc);
8517 break;
8518 }
8519 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8520
8521 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8522 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8523
8524 if (rcStrict == VINF_SUCCESS)
8525 {
8526 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8527
8528 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8529 if ( idMsr == MSR_IA32_APICBASE
8530 || ( idMsr >= MSR_IA32_X2APIC_START
8531 && idMsr <= MSR_IA32_X2APIC_END))
8532 {
8533 /*
8534 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8535 * When full APIC register virtualization is implemented we'll have to make
8536 * sure APIC state is saved from the VMCS before IEM changes it.
8537 */
8538 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8539 }
8540 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8541 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8542 else if (idMsr == MSR_K6_EFER)
8543 {
8544 /*
8545 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8546 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8547 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8548 */
8549 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8550 }
8551
8552 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8553 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8554 {
8555 switch (idMsr)
8556 {
8557 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8558 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8559 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8560 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8561 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8562 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8563 default:
8564 {
8565#ifndef IN_NEM_DARWIN
8566 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8567 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8568 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8569 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8570#else
8571 AssertMsgFailed(("TODO\n"));
8572#endif
8573 break;
8574 }
8575 }
8576 }
8577#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8578 else
8579 {
8580 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8581 switch (idMsr)
8582 {
8583 case MSR_IA32_SYSENTER_CS:
8584 case MSR_IA32_SYSENTER_EIP:
8585 case MSR_IA32_SYSENTER_ESP:
8586 case MSR_K8_FS_BASE:
8587 case MSR_K8_GS_BASE:
8588 {
8589 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8590 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8591 }
8592
8593 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8594 default:
8595 {
8596 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8597 {
8598 /* EFER MSR writes are always intercepted. */
8599 if (idMsr != MSR_K6_EFER)
8600 {
8601 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8602 idMsr));
8603 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8604 }
8605 }
8606
8607 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8608 {
8609 Assert(pVmcsInfo->pvMsrBitmap);
8610 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8611 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8612 {
8613 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8614 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8615 }
8616 }
8617 break;
8618 }
8619 }
8620 }
8621#endif /* VBOX_STRICT */
8622 }
8623 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8624 {
8625 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8626 rcStrict = VINF_SUCCESS;
8627 }
8628 else
8629 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8630 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8631
8632 return rcStrict;
8633}
8634
8635
8636/**
8637 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8638 */
8639HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8640{
8641 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8642
8643 /** @todo The guest has likely hit a contended spinlock. We might want to
8644 * poke a schedule different guest VCPU. */
8645 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8646 if (RT_SUCCESS(rc))
8647 return VINF_EM_RAW_INTERRUPT;
8648
8649 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8650 return rc;
8651}
8652
8653
8654/**
8655 * VM-exit handler for when the TPR value is lowered below the specified
8656 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8657 */
8658HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8659{
8660 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8661 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8662
8663 /*
8664 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8665 * We'll re-evaluate pending interrupts and inject them before the next VM
8666 * entry so we can just continue execution here.
8667 */
8668 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8669 return VINF_SUCCESS;
8670}
8671
8672
8673/**
8674 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8675 * VM-exit.
8676 *
8677 * @retval VINF_SUCCESS when guest execution can continue.
8678 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8679 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8680 * incompatible guest state for VMX execution (real-on-v86 case).
8681 */
8682HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8683{
8684 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8685 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8686
8687 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8688 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8689 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8690
8691 VBOXSTRICTRC rcStrict;
8692 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8693 uint64_t const uExitQual = pVmxTransient->uExitQual;
8694 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8695 switch (uAccessType)
8696 {
8697 /*
8698 * MOV to CRx.
8699 */
8700 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8701 {
8702 /*
8703 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8704 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8705 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8706 * PAE PDPTEs as well.
8707 */
8708 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8709 AssertRCReturn(rc, rc);
8710
8711 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8712#ifndef IN_NEM_DARWIN
8713 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8714#endif
8715 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8716 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8717
8718 /*
8719 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8720 * - When nested paging isn't used.
8721 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8722 * - We are executing in the VM debug loop.
8723 */
8724#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8725# ifndef IN_NEM_DARWIN
8726 Assert( iCrReg != 3
8727 || !VM_IS_VMX_NESTED_PAGING(pVM)
8728 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8729 || pVCpu->hmr0.s.fUsingDebugLoop);
8730# else
8731 Assert( iCrReg != 3
8732 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8733# endif
8734#endif
8735
8736 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8737 Assert( iCrReg != 8
8738 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8739
8740 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8741 AssertMsg( rcStrict == VINF_SUCCESS
8742 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8743
8744#ifndef IN_NEM_DARWIN
8745 /*
8746 * This is a kludge for handling switches back to real mode when we try to use
8747 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8748 * deal with special selector values, so we have to return to ring-3 and run
8749 * there till the selector values are V86 mode compatible.
8750 *
8751 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8752 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8753 * this function.
8754 */
8755 if ( iCrReg == 0
8756 && rcStrict == VINF_SUCCESS
8757 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8758 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8759 && (uOldCr0 & X86_CR0_PE)
8760 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8761 {
8762 /** @todo Check selectors rather than returning all the time. */
8763 Assert(!pVmxTransient->fIsNestedGuest);
8764 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8765 rcStrict = VINF_EM_RESCHEDULE_REM;
8766 }
8767#endif
8768
8769 break;
8770 }
8771
8772 /*
8773 * MOV from CRx.
8774 */
8775 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8776 {
8777 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8778 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8779
8780 /*
8781 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8782 * - When nested paging isn't used.
8783 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8784 * - We are executing in the VM debug loop.
8785 */
8786#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8787# ifndef IN_NEM_DARWIN
8788 Assert( iCrReg != 3
8789 || !VM_IS_VMX_NESTED_PAGING(pVM)
8790 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8791 || pVCpu->hmr0.s.fLeaveDone);
8792# else
8793 Assert( iCrReg != 3
8794 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8795# endif
8796#endif
8797
8798 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8799 Assert( iCrReg != 8
8800 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8801
8802 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8803 break;
8804 }
8805
8806 /*
8807 * CLTS (Clear Task-Switch Flag in CR0).
8808 */
8809 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8810 {
8811 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8812 break;
8813 }
8814
8815 /*
8816 * LMSW (Load Machine-Status Word into CR0).
8817 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8818 */
8819 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8820 {
8821 RTGCPTR GCPtrEffDst;
8822 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8823 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8824 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8825 if (fMemOperand)
8826 {
8827 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8828 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8829 }
8830 else
8831 GCPtrEffDst = NIL_RTGCPTR;
8832 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8833 break;
8834 }
8835
8836 default:
8837 {
8838 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8839 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8840 }
8841 }
8842
8843 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8844 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8845 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8846
8847 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8848 NOREF(pVM);
8849 return rcStrict;
8850}
8851
8852
8853/**
8854 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8855 * VM-exit.
8856 */
8857HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8858{
8859 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8860 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8861
8862 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8863 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8864 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8865 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8866#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8867 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8868 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8869 AssertRCReturn(rc, rc);
8870
8871 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8872 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8873 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8874 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8875 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8876 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8877 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8878 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8879
8880 /*
8881 * Update exit history to see if this exit can be optimized.
8882 */
8883 VBOXSTRICTRC rcStrict;
8884 PCEMEXITREC pExitRec = NULL;
8885 if ( !fGstStepping
8886 && !fDbgStepping)
8887 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8888 !fIOString
8889 ? !fIOWrite
8890 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8891 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8892 : !fIOWrite
8893 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8894 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8895 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8896 if (!pExitRec)
8897 {
8898 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8899 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8900
8901 uint32_t const cbValue = s_aIOSizes[uIOSize];
8902 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8903 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8904 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8905 if (fIOString)
8906 {
8907 /*
8908 * INS/OUTS - I/O String instruction.
8909 *
8910 * Use instruction-information if available, otherwise fall back on
8911 * interpreting the instruction.
8912 */
8913 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8914 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8915 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8916 if (fInsOutsInfo)
8917 {
8918 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8919 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8920 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8921 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8922 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8923 if (fIOWrite)
8924 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8925 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8926 else
8927 {
8928 /*
8929 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8930 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8931 * See Intel Instruction spec. for "INS".
8932 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8933 */
8934 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8935 }
8936 }
8937 else
8938 rcStrict = IEMExecOne(pVCpu);
8939
8940 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8941 fUpdateRipAlready = true;
8942 }
8943 else
8944 {
8945 /*
8946 * IN/OUT - I/O instruction.
8947 */
8948 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8949 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8950 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8951 if (fIOWrite)
8952 {
8953 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8954 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8955#ifndef IN_NEM_DARWIN
8956 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8957 && !pCtx->eflags.Bits.u1TF)
8958 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8959#endif
8960 }
8961 else
8962 {
8963 uint32_t u32Result = 0;
8964 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8965 if (IOM_SUCCESS(rcStrict))
8966 {
8967 /* Save result of I/O IN instr. in AL/AX/EAX. */
8968 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8969 }
8970#ifndef IN_NEM_DARWIN
8971 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8972 && !pCtx->eflags.Bits.u1TF)
8973 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8974#endif
8975 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8976 }
8977 }
8978
8979 if (IOM_SUCCESS(rcStrict))
8980 {
8981 if (!fUpdateRipAlready)
8982 {
8983 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8984 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8985 }
8986
8987 /*
8988 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8989 * while booting Fedora 17 64-bit guest.
8990 *
8991 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8992 */
8993 if (fIOString)
8994 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8995
8996 /*
8997 * If any I/O breakpoints are armed, we need to check if one triggered
8998 * and take appropriate action.
8999 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9000 */
9001#if 1
9002 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
9003#else
9004 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
9005 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
9006 AssertRCReturn(rc, rc);
9007#endif
9008
9009 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9010 * execution engines about whether hyper BPs and such are pending. */
9011 uint32_t const uDr7 = pCtx->dr[7];
9012 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9013 && X86_DR7_ANY_RW_IO(uDr7)
9014 && (pCtx->cr4 & X86_CR4_DE))
9015 || DBGFBpIsHwIoArmed(pVM)))
9016 {
9017 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
9018
9019#ifndef IN_NEM_DARWIN
9020 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
9021 VMMRZCallRing3Disable(pVCpu);
9022 HM_DISABLE_PREEMPT(pVCpu);
9023
9024 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
9025
9026 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
9027 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9028 {
9029 /* Raise #DB. */
9030 if (fIsGuestDbgActive)
9031 ASMSetDR6(pCtx->dr[6]);
9032 if (pCtx->dr[7] != uDr7)
9033 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
9034
9035 vmxHCSetPendingXcptDB(pVCpu);
9036 }
9037 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
9038 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
9039 else if ( rcStrict2 != VINF_SUCCESS
9040 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9041 rcStrict = rcStrict2;
9042 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
9043
9044 HM_RESTORE_PREEMPT();
9045 VMMRZCallRing3Enable(pVCpu);
9046#else
9047 /** @todo */
9048#endif
9049 }
9050 }
9051
9052#ifdef VBOX_STRICT
9053 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9054 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
9055 Assert(!fIOWrite);
9056 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9057 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
9058 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
9059 Assert(fIOWrite);
9060 else
9061 {
9062# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9063 * statuses, that the VMM device and some others may return. See
9064 * IOM_SUCCESS() for guidance. */
9065 AssertMsg( RT_FAILURE(rcStrict)
9066 || rcStrict == VINF_SUCCESS
9067 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9068 || rcStrict == VINF_EM_DBG_BREAKPOINT
9069 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9070 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9071# endif
9072 }
9073#endif
9074 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9075 }
9076 else
9077 {
9078 /*
9079 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9080 */
9081 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9082 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9083 AssertRCReturn(rc2, rc2);
9084 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9085 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9086 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9087 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9088 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9089 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9090
9091 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9092 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9093
9094 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9095 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9096 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9097 }
9098 return rcStrict;
9099}
9100
9101
9102/**
9103 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9104 * VM-exit.
9105 */
9106HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9107{
9108 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9109
9110 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9111 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9112 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9113 {
9114 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9115 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9116 {
9117 uint32_t uErrCode;
9118 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9119 {
9120 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9121 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9122 }
9123 else
9124 uErrCode = 0;
9125
9126 RTGCUINTPTR GCPtrFaultAddress;
9127 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9128 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9129 else
9130 GCPtrFaultAddress = 0;
9131
9132 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9133
9134 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9135 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9136
9137 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9138 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9139 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9140 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9141 }
9142 }
9143
9144 /* Fall back to the interpreter to emulate the task-switch. */
9145 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9146 return VERR_EM_INTERPRETER;
9147}
9148
9149
9150/**
9151 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9152 */
9153HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9154{
9155 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9156
9157 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9158 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9159 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9160 AssertRC(rc);
9161 return VINF_EM_DBG_STEPPED;
9162}
9163
9164
9165/**
9166 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9167 */
9168HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9169{
9170 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9171 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9172
9173 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9174 | HMVMX_READ_EXIT_INSTR_LEN
9175 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9176 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9177 | HMVMX_READ_IDT_VECTORING_INFO
9178 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9179
9180 /*
9181 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9182 */
9183 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9184 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9185 {
9186 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9187 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9188 {
9189 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9190 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9191 }
9192 }
9193 else
9194 {
9195 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9196 return rcStrict;
9197 }
9198
9199 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9200 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9201 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9202 AssertRCReturn(rc, rc);
9203
9204 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9205 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9206 switch (uAccessType)
9207 {
9208#ifndef IN_NEM_DARWIN
9209 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9210 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9211 {
9212 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9213 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9214 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9215
9216 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9217 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9218 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9219 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9220 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9221
9222 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9223 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9224 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9225 if ( rcStrict == VINF_SUCCESS
9226 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9227 || rcStrict == VERR_PAGE_NOT_PRESENT)
9228 {
9229 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9230 | HM_CHANGED_GUEST_APIC_TPR);
9231 rcStrict = VINF_SUCCESS;
9232 }
9233 break;
9234 }
9235#else
9236 /** @todo */
9237#endif
9238
9239 default:
9240 {
9241 Log4Func(("uAccessType=%#x\n", uAccessType));
9242 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9243 break;
9244 }
9245 }
9246
9247 if (rcStrict != VINF_SUCCESS)
9248 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9249 return rcStrict;
9250}
9251
9252
9253/**
9254 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9255 * VM-exit.
9256 */
9257HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9258{
9259 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9260 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9261
9262 /*
9263 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9264 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9265 * must emulate the MOV DRx access.
9266 */
9267 if (!pVmxTransient->fIsNestedGuest)
9268 {
9269 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9270 if ( pVmxTransient->fWasGuestDebugStateActive
9271#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9272 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9273#endif
9274 )
9275 {
9276 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9277 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9278 }
9279
9280 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9281 && !pVmxTransient->fWasHyperDebugStateActive)
9282 {
9283 Assert(!DBGFIsStepping(pVCpu));
9284 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9285
9286 /* Whether we disable intercepting MOV DRx instructions and resume
9287 the current one, or emulate it and keep intercepting them is
9288 configurable. Though it usually comes down to whether there are
9289 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9290#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9291 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9292#else
9293 bool const fResumeInstruction = true;
9294#endif
9295 if (fResumeInstruction)
9296 {
9297 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9298 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9299 AssertRC(rc);
9300 }
9301
9302#ifndef IN_NEM_DARWIN
9303 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9304 VMMRZCallRing3Disable(pVCpu);
9305 HM_DISABLE_PREEMPT(pVCpu);
9306
9307 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9308 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9309 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9310
9311 HM_RESTORE_PREEMPT();
9312 VMMRZCallRing3Enable(pVCpu);
9313#else
9314 CPUMR3NemActivateGuestDebugState(pVCpu);
9315 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9316 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9317#endif
9318
9319 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9320 if (fResumeInstruction)
9321 {
9322#ifdef VBOX_WITH_STATISTICS
9323 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9324 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9325 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9326 else
9327 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9328#endif
9329 return VINF_SUCCESS;
9330 }
9331 }
9332 }
9333
9334 /*
9335 * Import state. We must have DR7 loaded here as it's always consulted,
9336 * both for reading and writing. The other debug registers are never
9337 * exported as such.
9338 */
9339 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9340 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9341 | CPUMCTX_EXTRN_GPRS_MASK
9342 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9343 AssertRCReturn(rc, rc);
9344
9345 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9346 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9347 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9348 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9349
9350 VBOXSTRICTRC rcStrict;
9351 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9352 {
9353 /*
9354 * Write DRx register.
9355 */
9356 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9357 AssertMsg( rcStrict == VINF_SUCCESS
9358 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9359
9360 if (rcStrict == VINF_SUCCESS)
9361 {
9362 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9363 * kept it for now to avoid breaking something non-obvious. */
9364 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9365 | HM_CHANGED_GUEST_DR7);
9366 /* Update the DR6 register if guest debug state is active, otherwise we'll
9367 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9368 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9369 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9370 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9371 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9372 }
9373 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9374 {
9375 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9376 rcStrict = VINF_SUCCESS;
9377 }
9378
9379 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9380 }
9381 else
9382 {
9383 /*
9384 * Read DRx register into a general purpose register.
9385 */
9386 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9387 AssertMsg( rcStrict == VINF_SUCCESS
9388 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9389
9390 if (rcStrict == VINF_SUCCESS)
9391 {
9392 if (iGReg == X86_GREG_xSP)
9393 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9394 | HM_CHANGED_GUEST_RSP);
9395 else
9396 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9397 }
9398 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9399 {
9400 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9401 rcStrict = VINF_SUCCESS;
9402 }
9403
9404 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9405 }
9406
9407 return rcStrict;
9408}
9409
9410
9411/**
9412 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9413 * Conditional VM-exit.
9414 */
9415HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9416{
9417 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9418
9419#ifndef IN_NEM_DARWIN
9420 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9421
9422 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9423 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9424 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9425 | HMVMX_READ_IDT_VECTORING_INFO
9426 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9427 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9428
9429 /*
9430 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9431 */
9432 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9433 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9434 {
9435 /*
9436 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9437 * instruction emulation to inject the original event. Otherwise, injecting the original event
9438 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9439 */
9440 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9441 { /* likely */ }
9442 else
9443 {
9444 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9445# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9446 /** @todo NSTVMX: Think about how this should be handled. */
9447 if (pVmxTransient->fIsNestedGuest)
9448 return VERR_VMX_IPE_3;
9449# endif
9450 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9451 }
9452 }
9453 else
9454 {
9455 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9456 return rcStrict;
9457 }
9458
9459 /*
9460 * Get sufficient state and update the exit history entry.
9461 */
9462 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9463 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9464 AssertRCReturn(rc, rc);
9465
9466 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9467 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9468 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9469 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9470 if (!pExitRec)
9471 {
9472 /*
9473 * If we succeed, resume guest execution.
9474 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9475 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9476 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9477 * weird case. See @bugref{6043}.
9478 */
9479 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9480/** @todo bird: We can probably just go straight to IOM here and assume that
9481 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9482 * well. However, we need to address that aliasing workarounds that
9483 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9484 *
9485 * Might also be interesting to see if we can get this done more or
9486 * less locklessly inside IOM. Need to consider the lookup table
9487 * updating and use a bit more carefully first (or do all updates via
9488 * rendezvous) */
9489 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9490 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9491 if ( rcStrict == VINF_SUCCESS
9492 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9493 || rcStrict == VERR_PAGE_NOT_PRESENT)
9494 {
9495 /* Successfully handled MMIO operation. */
9496 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9497 | HM_CHANGED_GUEST_APIC_TPR);
9498 rcStrict = VINF_SUCCESS;
9499 }
9500 }
9501 else
9502 {
9503 /*
9504 * Frequent exit or something needing probing. Call EMHistoryExec.
9505 */
9506 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9507 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9508
9509 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9510 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9511
9512 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9513 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9514 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9515 }
9516 return rcStrict;
9517#else
9518 AssertFailed();
9519 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9520#endif
9521}
9522
9523
9524/**
9525 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9526 * VM-exit.
9527 */
9528HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9529{
9530 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9531#ifndef IN_NEM_DARWIN
9532 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9533
9534 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9535 | HMVMX_READ_EXIT_INSTR_LEN
9536 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9537 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9538 | HMVMX_READ_IDT_VECTORING_INFO
9539 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9540 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9541
9542 /*
9543 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9544 */
9545 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9547 {
9548 /*
9549 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9550 * we shall resolve the nested #PF and re-inject the original event.
9551 */
9552 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9553 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9554 }
9555 else
9556 {
9557 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9558 return rcStrict;
9559 }
9560
9561 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9562 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9563 AssertRCReturn(rc, rc);
9564
9565 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9566 uint64_t const uExitQual = pVmxTransient->uExitQual;
9567 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9568
9569 RTGCUINT uErrorCode = 0;
9570 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9571 uErrorCode |= X86_TRAP_PF_ID;
9572 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9573 uErrorCode |= X86_TRAP_PF_RW;
9574 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9575 uErrorCode |= X86_TRAP_PF_P;
9576
9577 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9578 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9579
9580 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9581
9582 /*
9583 * Handle the pagefault trap for the nested shadow table.
9584 */
9585 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9586 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9587 TRPMResetTrap(pVCpu);
9588
9589 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9590 if ( rcStrict == VINF_SUCCESS
9591 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9592 || rcStrict == VERR_PAGE_NOT_PRESENT)
9593 {
9594 /* Successfully synced our nested page tables. */
9595 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9596 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9597 return VINF_SUCCESS;
9598 }
9599 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9600 return rcStrict;
9601
9602#else /* IN_NEM_DARWIN */
9603 PVM pVM = pVCpu->CTX_SUFF(pVM);
9604 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9605 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9606 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9607 vmxHCImportGuestRip(pVCpu);
9608 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9609
9610 /*
9611 * Ask PGM for information about the given GCPhys. We need to check if we're
9612 * out of sync first.
9613 */
9614 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9615 false,
9616 false };
9617 PGMPHYSNEMPAGEINFO Info;
9618 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9619 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9620 if (RT_SUCCESS(rc))
9621 {
9622 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9623 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9624 {
9625 if (State.fCanResume)
9626 {
9627 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9628 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9629 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9630 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9631 State.fDidSomething ? "" : " no-change"));
9632 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9633 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9634 return VINF_SUCCESS;
9635 }
9636 }
9637
9638 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9639 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9640 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9641 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9642 State.fDidSomething ? "" : " no-change"));
9643 }
9644 else
9645 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9646 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9647 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9648
9649 /*
9650 * Emulate the memory access, either access handler or special memory.
9651 */
9652 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9653 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9654 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9655 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9656 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9657
9658 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9659 AssertRCReturn(rc, rc);
9660
9661 VBOXSTRICTRC rcStrict;
9662 if (!pExitRec)
9663 rcStrict = IEMExecOne(pVCpu);
9664 else
9665 {
9666 /* Frequent access or probing. */
9667 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9668 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9669 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9670 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9671 }
9672
9673 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9674
9675 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9676 return rcStrict;
9677#endif /* IN_NEM_DARWIN */
9678}
9679
9680#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9681
9682/**
9683 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9684 */
9685HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9686{
9687 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9688
9689 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9690 | HMVMX_READ_EXIT_INSTR_INFO
9691 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9692 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9693 | CPUMCTX_EXTRN_SREG_MASK
9694 | CPUMCTX_EXTRN_HWVIRT
9695 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9696 AssertRCReturn(rc, rc);
9697
9698 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9699
9700 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9701 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9702
9703 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9704 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9705 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9706 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9707 {
9708 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9709 rcStrict = VINF_SUCCESS;
9710 }
9711 return rcStrict;
9712}
9713
9714
9715/**
9716 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9717 */
9718HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9719{
9720 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9721
9722 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9723 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9724 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9725 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9726 AssertRCReturn(rc, rc);
9727
9728 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9729
9730 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9731 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9732 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9733 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9734 {
9735 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9736 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9737 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9738 }
9739 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9740 return rcStrict;
9741}
9742
9743
9744/**
9745 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9746 */
9747HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9748{
9749 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9750
9751 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9752 | HMVMX_READ_EXIT_INSTR_INFO
9753 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9754 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9755 | CPUMCTX_EXTRN_SREG_MASK
9756 | CPUMCTX_EXTRN_HWVIRT
9757 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9758 AssertRCReturn(rc, rc);
9759
9760 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9761
9762 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9763 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9764
9765 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9766 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9767 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9768 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9769 {
9770 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9771 rcStrict = VINF_SUCCESS;
9772 }
9773 return rcStrict;
9774}
9775
9776
9777/**
9778 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9779 */
9780HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9781{
9782 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9783
9784 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9785 | HMVMX_READ_EXIT_INSTR_INFO
9786 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9787 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9788 | CPUMCTX_EXTRN_SREG_MASK
9789 | CPUMCTX_EXTRN_HWVIRT
9790 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9791 AssertRCReturn(rc, rc);
9792
9793 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9794
9795 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9796 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9797
9798 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9799 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9800 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9801 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9802 {
9803 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9804 rcStrict = VINF_SUCCESS;
9805 }
9806 return rcStrict;
9807}
9808
9809
9810/**
9811 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9812 */
9813HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9814{
9815 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9816
9817 /*
9818 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9819 * thus might not need to import the shadow VMCS state, it's safer just in case
9820 * code elsewhere dares look at unsynced VMCS fields.
9821 */
9822 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9823 | HMVMX_READ_EXIT_INSTR_INFO
9824 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9825 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9826 | CPUMCTX_EXTRN_SREG_MASK
9827 | CPUMCTX_EXTRN_HWVIRT
9828 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9829 AssertRCReturn(rc, rc);
9830
9831 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9832
9833 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9834 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9835 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9836
9837 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9838 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9839 {
9840 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9841
9842# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9843 /* Try for exit optimization. This is on the following instruction
9844 because it would be a waste of time to have to reinterpret the
9845 already decoded vmwrite instruction. */
9846 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9847 if (pExitRec)
9848 {
9849 /* Frequent access or probing. */
9850 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9851 AssertRCReturn(rc, rc);
9852
9853 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9854 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9855 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9856 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9857 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9858 }
9859# endif
9860 }
9861 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9862 {
9863 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9864 rcStrict = VINF_SUCCESS;
9865 }
9866 return rcStrict;
9867}
9868
9869
9870/**
9871 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9872 */
9873HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9874{
9875 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9876
9877 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9878 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9879 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9880 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9881 AssertRCReturn(rc, rc);
9882
9883 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9884
9885 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9886 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9887 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9888 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9889 {
9890 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9891 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9892 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9893 }
9894 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9895 return rcStrict;
9896}
9897
9898
9899/**
9900 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9901 */
9902HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9903{
9904 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9905
9906 /*
9907 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9908 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9909 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9910 */
9911 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9912 | HMVMX_READ_EXIT_INSTR_INFO
9913 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9914 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9915 | CPUMCTX_EXTRN_SREG_MASK
9916 | CPUMCTX_EXTRN_HWVIRT
9917 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9918 AssertRCReturn(rc, rc);
9919
9920 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9921
9922 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9923 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9924 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9925
9926 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9927 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9928 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9929 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9930 {
9931 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9932 rcStrict = VINF_SUCCESS;
9933 }
9934 return rcStrict;
9935}
9936
9937
9938/**
9939 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9940 */
9941HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9942{
9943 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9944
9945 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9946 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9947 | CPUMCTX_EXTRN_HWVIRT
9948 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9949 AssertRCReturn(rc, rc);
9950
9951 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9952
9953 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9954 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9955 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9956 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9957 {
9958 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9959 rcStrict = VINF_SUCCESS;
9960 }
9961 return rcStrict;
9962}
9963
9964
9965/**
9966 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9967 */
9968HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9969{
9970 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9971
9972 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9973 | HMVMX_READ_EXIT_INSTR_INFO
9974 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9975 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9976 | CPUMCTX_EXTRN_SREG_MASK
9977 | CPUMCTX_EXTRN_HWVIRT
9978 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9979 AssertRCReturn(rc, rc);
9980
9981 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9982
9983 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9984 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9985
9986 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9987 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9988 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9989 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9990 {
9991 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9992 rcStrict = VINF_SUCCESS;
9993 }
9994 return rcStrict;
9995}
9996
9997
9998/**
9999 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
10000 */
10001HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10002{
10003 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10004
10005 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10006 | HMVMX_READ_EXIT_INSTR_INFO
10007 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10008 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10009 | CPUMCTX_EXTRN_SREG_MASK
10010 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10011 AssertRCReturn(rc, rc);
10012
10013 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10014
10015 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10016 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10017
10018 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
10019 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10020 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10021 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10022 {
10023 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10024 rcStrict = VINF_SUCCESS;
10025 }
10026 return rcStrict;
10027}
10028
10029
10030# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10031/**
10032 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
10033 */
10034HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10035{
10036 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10037
10038 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10039 | HMVMX_READ_EXIT_INSTR_INFO
10040 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10041 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10042 | CPUMCTX_EXTRN_SREG_MASK
10043 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10044 AssertRCReturn(rc, rc);
10045
10046 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10047
10048 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10049 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10050
10051 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
10052 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10053 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10054 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10055 {
10056 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10057 rcStrict = VINF_SUCCESS;
10058 }
10059 return rcStrict;
10060}
10061# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10062#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10063/** @} */
10064
10065
10066#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10067/** @name Nested-guest VM-exit handlers.
10068 * @{
10069 */
10070/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10071/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10072/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10073
10074/**
10075 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10076 * Conditional VM-exit.
10077 */
10078HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10079{
10080 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10081
10082 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10083
10084 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10085 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10086 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10087
10088 switch (uExitIntType)
10089 {
10090# ifndef IN_NEM_DARWIN
10091 /*
10092 * Physical NMIs:
10093 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10094 */
10095 case VMX_EXIT_INT_INFO_TYPE_NMI:
10096 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10097# endif
10098
10099 /*
10100 * Hardware exceptions,
10101 * Software exceptions,
10102 * Privileged software exceptions:
10103 * Figure out if the exception must be delivered to the guest or the nested-guest.
10104 */
10105 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10106 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10107 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10108 {
10109 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10110 | HMVMX_READ_EXIT_INSTR_LEN
10111 | HMVMX_READ_IDT_VECTORING_INFO
10112 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10113
10114 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10115 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10116 {
10117 /* Exit qualification is required for debug and page-fault exceptions. */
10118 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10119
10120 /*
10121 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10122 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10123 * length. However, if delivery of a software interrupt, software exception or privileged
10124 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10125 */
10126 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10127 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10128 pVmxTransient->uExitIntErrorCode,
10129 pVmxTransient->uIdtVectoringInfo,
10130 pVmxTransient->uIdtVectoringErrorCode);
10131#ifdef DEBUG_ramshankar
10132 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10133 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10134 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10135 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10136 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10137 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10138#endif
10139 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10140 }
10141
10142 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10143 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10144 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10145 }
10146
10147 /*
10148 * Software interrupts:
10149 * VM-exits cannot be caused by software interrupts.
10150 *
10151 * External interrupts:
10152 * This should only happen when "acknowledge external interrupts on VM-exit"
10153 * control is set. However, we never set this when executing a guest or
10154 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10155 * the guest.
10156 */
10157 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10158 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10159 default:
10160 {
10161 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10162 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10163 }
10164 }
10165}
10166
10167
10168/**
10169 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10170 * Unconditional VM-exit.
10171 */
10172HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10173{
10174 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10175 return IEMExecVmxVmexitTripleFault(pVCpu);
10176}
10177
10178
10179/**
10180 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10181 */
10182HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10183{
10184 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10185
10186 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10187 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10188 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10189}
10190
10191
10192/**
10193 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10194 */
10195HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10196{
10197 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10198
10199 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10200 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10201 return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
10202}
10203
10204
10205/**
10206 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10207 * Unconditional VM-exit.
10208 */
10209HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10210{
10211 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10212
10213 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10214 | HMVMX_READ_EXIT_INSTR_LEN
10215 | HMVMX_READ_IDT_VECTORING_INFO
10216 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10217
10218 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10219 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10220 pVmxTransient->uIdtVectoringErrorCode);
10221 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10222}
10223
10224
10225/**
10226 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10227 */
10228HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10229{
10230 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10231
10232 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10233 {
10234 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10235 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10236 }
10237 return vmxHCExitHlt(pVCpu, pVmxTransient);
10238}
10239
10240
10241/**
10242 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10243 */
10244HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10245{
10246 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10247
10248 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10249 {
10250 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10251 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10252 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10253 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10254 }
10255 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10256}
10257
10258
10259/**
10260 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10261 */
10262HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10263{
10264 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10265
10266 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10267 {
10268 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10269 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10270 }
10271 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10272}
10273
10274
10275/**
10276 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10277 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10278 */
10279HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10280{
10281 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10282
10283 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10284 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10285
10286 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10287
10288 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10289 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10290 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10291
10292 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10293 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10294 u64VmcsField &= UINT64_C(0xffffffff);
10295
10296 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10297 {
10298 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10299 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10300 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10301 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10302 }
10303
10304 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10305 return vmxHCExitVmread(pVCpu, pVmxTransient);
10306 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10307}
10308
10309
10310/**
10311 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10312 */
10313HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10314{
10315 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10316
10317 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10318 {
10319 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10320 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10321 }
10322
10323 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10324}
10325
10326
10327/**
10328 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10329 * Conditional VM-exit.
10330 */
10331HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10332{
10333 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10334
10335 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10336 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10337
10338 VBOXSTRICTRC rcStrict;
10339 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10340 switch (uAccessType)
10341 {
10342 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10343 {
10344 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10345 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10346 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10347 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10348
10349 bool fIntercept;
10350 switch (iCrReg)
10351 {
10352 case 0:
10353 case 4:
10354 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10355 break;
10356
10357 case 3:
10358 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10359 break;
10360
10361 case 8:
10362 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10363 break;
10364
10365 default:
10366 fIntercept = false;
10367 break;
10368 }
10369 if (fIntercept)
10370 {
10371 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10372 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10373 }
10374 else
10375 {
10376 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10377 AssertRCReturn(rc, rc);
10378 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10379 }
10380 break;
10381 }
10382
10383 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10384 {
10385 /*
10386 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10387 * CR2 reads do not cause a VM-exit.
10388 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10389 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10390 */
10391 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10392 if ( iCrReg == 3
10393 || iCrReg == 8)
10394 {
10395 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10396 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10397 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10398 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10399 {
10400 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10401 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10402 }
10403 else
10404 {
10405 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10406 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10407 }
10408 }
10409 else
10410 {
10411 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10412 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10413 }
10414 break;
10415 }
10416
10417 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10418 {
10419 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10420 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10421 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10422 if ( (uGstHostMask & X86_CR0_TS)
10423 && (uReadShadow & X86_CR0_TS))
10424 {
10425 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10426 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10427 }
10428 else
10429 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10430 break;
10431 }
10432
10433 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10434 {
10435 RTGCPTR GCPtrEffDst;
10436 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10437 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10438 if (fMemOperand)
10439 {
10440 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10441 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10442 }
10443 else
10444 GCPtrEffDst = NIL_RTGCPTR;
10445
10446 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10447 {
10448 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10449 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10450 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10451 }
10452 else
10453 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10454 break;
10455 }
10456
10457 default:
10458 {
10459 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10460 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10461 }
10462 }
10463
10464 if (rcStrict == VINF_IEM_RAISED_XCPT)
10465 {
10466 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10467 rcStrict = VINF_SUCCESS;
10468 }
10469 return rcStrict;
10470}
10471
10472
10473/**
10474 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10475 * Conditional VM-exit.
10476 */
10477HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10478{
10479 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10480
10481 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10482 {
10483 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10484 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10485 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10486 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10487 }
10488 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10489}
10490
10491
10492/**
10493 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10494 * Conditional VM-exit.
10495 */
10496HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10497{
10498 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10499
10500 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10501
10502 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10503 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10504 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10505
10506 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10507 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10508 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10509 {
10510 /*
10511 * IN/OUT instruction:
10512 * - Provides VM-exit instruction length.
10513 *
10514 * INS/OUTS instruction:
10515 * - Provides VM-exit instruction length.
10516 * - Provides Guest-linear address.
10517 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10518 */
10519 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10520 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10521
10522 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10523 pVmxTransient->ExitInstrInfo.u = 0;
10524 pVmxTransient->uGuestLinearAddr = 0;
10525
10526 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10527 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10528 if (fIOString)
10529 {
10530 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10531 if (fVmxInsOutsInfo)
10532 {
10533 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10534 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10535 }
10536 }
10537
10538 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10539 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10540 }
10541 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10542}
10543
10544
10545/**
10546 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10547 */
10548HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10549{
10550 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10551
10552 uint32_t fMsrpm;
10553 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10554 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10555 else
10556 fMsrpm = VMXMSRPM_EXIT_RD;
10557
10558 if (fMsrpm & VMXMSRPM_EXIT_RD)
10559 {
10560 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10561 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10562 }
10563 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10564}
10565
10566
10567/**
10568 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10569 */
10570HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10571{
10572 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10573
10574 uint32_t fMsrpm;
10575 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10576 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10577 else
10578 fMsrpm = VMXMSRPM_EXIT_WR;
10579
10580 if (fMsrpm & VMXMSRPM_EXIT_WR)
10581 {
10582 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10583 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10584 }
10585 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10586}
10587
10588
10589/**
10590 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10591 */
10592HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10593{
10594 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10595
10596 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10597 {
10598 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10599 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10600 }
10601 return vmxHCExitMwait(pVCpu, pVmxTransient);
10602}
10603
10604
10605/**
10606 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10607 * VM-exit.
10608 */
10609HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10610{
10611 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10612
10613 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10614 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10615 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10616 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10617}
10618
10619
10620/**
10621 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10622 */
10623HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10624{
10625 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10626
10627 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10628 {
10629 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10630 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10631 }
10632 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10633}
10634
10635
10636/**
10637 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10638 */
10639HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10640{
10641 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10642
10643 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10644 * PAUSE when executing a nested-guest? If it does not, we would not need
10645 * to check for the intercepts here. Just call VM-exit... */
10646
10647 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10648 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10649 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10650 {
10651 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10652 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10653 }
10654 return vmxHCExitPause(pVCpu, pVmxTransient);
10655}
10656
10657
10658/**
10659 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10660 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10661 */
10662HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10663{
10664 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10665
10666 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10667 {
10668 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10669 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10670 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10671 }
10672 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10673}
10674
10675
10676/**
10677 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10678 * VM-exit.
10679 */
10680HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10681{
10682 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10683
10684 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10685 | HMVMX_READ_EXIT_INSTR_LEN
10686 | HMVMX_READ_IDT_VECTORING_INFO
10687 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10688
10689 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10690
10691 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10692 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10693
10694 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10695 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10696 pVmxTransient->uIdtVectoringErrorCode);
10697 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10698}
10699
10700
10701/**
10702 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10703 * Conditional VM-exit.
10704 */
10705HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10706{
10707 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10708
10709 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10710 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10711 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10712}
10713
10714
10715/**
10716 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10717 * Conditional VM-exit.
10718 */
10719HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10720{
10721 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10722
10723 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10724 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10725 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10726}
10727
10728
10729/**
10730 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10731 */
10732HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10733{
10734 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10735
10736 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10737 {
10738 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10739 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10740 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10741 }
10742 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10743}
10744
10745
10746/**
10747 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10748 */
10749HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10750{
10751 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10752
10753 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10754 {
10755 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10756 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10757 }
10758 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10759}
10760
10761
10762/**
10763 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10764 */
10765HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10766{
10767 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10768
10769 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10770 {
10771 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10772 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10773 | HMVMX_READ_EXIT_INSTR_INFO
10774 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10775 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10776 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10777 }
10778 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10779}
10780
10781
10782/**
10783 * Nested-guest VM-exit handler for invalid-guest state
10784 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10785 */
10786HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10787{
10788 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10789
10790 /*
10791 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10792 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10793 * Handle it like it's in an invalid guest state of the outer guest.
10794 *
10795 * When the fast path is implemented, this should be changed to cause the corresponding
10796 * nested-guest VM-exit.
10797 */
10798 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10799}
10800
10801
10802/**
10803 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10804 * and only provide the instruction length.
10805 *
10806 * Unconditional VM-exit.
10807 */
10808HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10809{
10810 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10811
10812#ifdef VBOX_STRICT
10813 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10814 switch (pVmxTransient->uExitReason)
10815 {
10816 case VMX_EXIT_ENCLS:
10817 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10818 break;
10819
10820 case VMX_EXIT_VMFUNC:
10821 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10822 break;
10823 }
10824#endif
10825
10826 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10827 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10828}
10829
10830
10831/**
10832 * Nested-guest VM-exit handler for instructions that provide instruction length as
10833 * well as more information.
10834 *
10835 * Unconditional VM-exit.
10836 */
10837HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10838{
10839 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10840
10841# ifdef VBOX_STRICT
10842 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10843 switch (pVmxTransient->uExitReason)
10844 {
10845 case VMX_EXIT_GDTR_IDTR_ACCESS:
10846 case VMX_EXIT_LDTR_TR_ACCESS:
10847 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10848 break;
10849
10850 case VMX_EXIT_RDRAND:
10851 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10852 break;
10853
10854 case VMX_EXIT_RDSEED:
10855 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10856 break;
10857
10858 case VMX_EXIT_XSAVES:
10859 case VMX_EXIT_XRSTORS:
10860 /** @todo NSTVMX: Verify XSS-bitmap. */
10861 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10862 break;
10863
10864 case VMX_EXIT_UMWAIT:
10865 case VMX_EXIT_TPAUSE:
10866 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10867 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10868 break;
10869
10870 case VMX_EXIT_LOADIWKEY:
10871 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10872 break;
10873 }
10874# endif
10875
10876 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10877 | HMVMX_READ_EXIT_INSTR_LEN
10878 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10879 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10880 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10881}
10882
10883# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10884
10885/**
10886 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10887 * Conditional VM-exit.
10888 */
10889HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10890{
10891 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10892 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10893
10894 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10895 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10896 {
10897 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10898 | HMVMX_READ_EXIT_INSTR_LEN
10899 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10900 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10901 | HMVMX_READ_IDT_VECTORING_INFO
10902 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10903 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10904 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10905 AssertRCReturn(rc, rc);
10906
10907 /*
10908 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10909 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10910 * it's its problem to deal with that issue and we'll clear the recovered event.
10911 */
10912 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10913 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10914 { /*likely*/ }
10915 else
10916 {
10917 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10918 return rcStrict;
10919 }
10920 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10921
10922 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10923 uint64_t const uExitQual = pVmxTransient->uExitQual;
10924
10925 RTGCPTR GCPtrNestedFault;
10926 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10927 if (fIsLinearAddrValid)
10928 {
10929 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10930 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10931 }
10932 else
10933 GCPtrNestedFault = 0;
10934
10935 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10936 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10937 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10938 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10939 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10940
10941 PGMPTWALK Walk;
10942 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10943 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10944 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10945 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10946 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10947 if (RT_SUCCESS(rcStrict))
10948 {
10949 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
10950 {
10951 Assert(!fClearEventOnForward);
10952 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
10953 rcStrict = VINF_EM_RESCHEDULE_REM;
10954 }
10955 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10956 return rcStrict;
10957 }
10958
10959 if (fClearEventOnForward)
10960 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10961
10962 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10963 pVmxTransient->uIdtVectoringErrorCode);
10964 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10965 {
10966 VMXVEXITINFO const ExitInfo
10967 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10968 pVmxTransient->uExitQual,
10969 pVmxTransient->cbExitInstr,
10970 pVmxTransient->uGuestLinearAddr,
10971 pVmxTransient->uGuestPhysicalAddr);
10972 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10973 }
10974
10975 AssertMsgReturn(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG,
10976 ("uErr=%#RX32 uExitQual=%#RX64 GCPhysNestedFault=%#RGp GCPtrNestedFault=%#RGv\n",
10977 (uint32_t)uErr, uExitQual, GCPhysNestedFault, GCPtrNestedFault),
10978 rcStrict);
10979 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10980 }
10981
10982 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10983}
10984
10985
10986/**
10987 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10988 * Conditional VM-exit.
10989 */
10990HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10991{
10992 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10993 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10994
10995 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10996 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10997 {
10998 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10999 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
11000 AssertRCReturn(rc, rc);
11001
11002 PGMPTWALK Walk;
11003 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11004 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11005 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
11006 GCPhysNestedFault, false /* fIsLinearAddrValid */,
11007 0 /* GCPtrNestedFault */, &Walk);
11008 if (RT_SUCCESS(rcStrict))
11009 {
11010 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
11011 return rcStrict;
11012 }
11013
11014 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
11015 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
11016 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
11017
11018 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11019 pVmxTransient->uIdtVectoringErrorCode);
11020 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11021 }
11022
11023 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
11024}
11025
11026# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
11027
11028/** @} */
11029#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11030
11031
11032/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11033 * probes.
11034 *
11035 * The following few functions and associated structure contains the bloat
11036 * necessary for providing detailed debug events and dtrace probes as well as
11037 * reliable host side single stepping. This works on the principle of
11038 * "subclassing" the normal execution loop and workers. We replace the loop
11039 * method completely and override selected helpers to add necessary adjustments
11040 * to their core operation.
11041 *
11042 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11043 * any performance for debug and analysis features.
11044 *
11045 * @{
11046 */
11047
11048/**
11049 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11050 * the debug run loop.
11051 */
11052typedef struct VMXRUNDBGSTATE
11053{
11054 /** The RIP we started executing at. This is for detecting that we stepped. */
11055 uint64_t uRipStart;
11056 /** The CS we started executing with. */
11057 uint16_t uCsStart;
11058
11059 /** Whether we've actually modified the 1st execution control field. */
11060 bool fModifiedProcCtls : 1;
11061 /** Whether we've actually modified the 2nd execution control field. */
11062 bool fModifiedProcCtls2 : 1;
11063 /** Whether we've actually modified the exception bitmap. */
11064 bool fModifiedXcptBitmap : 1;
11065
11066 /** We desire the modified the CR0 mask to be cleared. */
11067 bool fClearCr0Mask : 1;
11068 /** We desire the modified the CR4 mask to be cleared. */
11069 bool fClearCr4Mask : 1;
11070 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11071 uint32_t fCpe1Extra;
11072 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11073 uint32_t fCpe1Unwanted;
11074 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11075 uint32_t fCpe2Extra;
11076 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11077 uint32_t bmXcptExtra;
11078 /** The sequence number of the Dtrace provider settings the state was
11079 * configured against. */
11080 uint32_t uDtraceSettingsSeqNo;
11081 /** VM-exits to check (one bit per VM-exit). */
11082 uint32_t bmExitsToCheck[3];
11083
11084 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11085 uint32_t fProcCtlsInitial;
11086 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11087 uint32_t fProcCtls2Initial;
11088 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11089 uint32_t bmXcptInitial;
11090} VMXRUNDBGSTATE;
11091AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11092typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11093
11094
11095/**
11096 * Initializes the VMXRUNDBGSTATE structure.
11097 *
11098 * @param pVCpu The cross context virtual CPU structure of the
11099 * calling EMT.
11100 * @param pVmxTransient The VMX-transient structure.
11101 * @param pDbgState The debug state to initialize.
11102 */
11103static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11104{
11105 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11106 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11107
11108 pDbgState->fModifiedProcCtls = false;
11109 pDbgState->fModifiedProcCtls2 = false;
11110 pDbgState->fModifiedXcptBitmap = false;
11111 pDbgState->fClearCr0Mask = false;
11112 pDbgState->fClearCr4Mask = false;
11113 pDbgState->fCpe1Extra = 0;
11114 pDbgState->fCpe1Unwanted = 0;
11115 pDbgState->fCpe2Extra = 0;
11116 pDbgState->bmXcptExtra = 0;
11117 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11118 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11119 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11120}
11121
11122
11123/**
11124 * Updates the VMSC fields with changes requested by @a pDbgState.
11125 *
11126 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11127 * immediately before executing guest code, i.e. when interrupts are disabled.
11128 * We don't check status codes here as we cannot easily assert or return in the
11129 * latter case.
11130 *
11131 * @param pVCpu The cross context virtual CPU structure.
11132 * @param pVmxTransient The VMX-transient structure.
11133 * @param pDbgState The debug state.
11134 */
11135static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11136{
11137 /*
11138 * Ensure desired flags in VMCS control fields are set.
11139 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11140 *
11141 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11142 * there should be no stale data in pCtx at this point.
11143 */
11144 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11145 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11146 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11147 {
11148 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11149 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11150 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11151 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11152 pDbgState->fModifiedProcCtls = true;
11153 }
11154
11155 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11156 {
11157 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11158 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11159 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11160 pDbgState->fModifiedProcCtls2 = true;
11161 }
11162
11163 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11164 {
11165 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11166 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11167 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11168 pDbgState->fModifiedXcptBitmap = true;
11169 }
11170
11171 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11172 {
11173 pVmcsInfo->u64Cr0Mask = 0;
11174 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11175 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11176 }
11177
11178 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11179 {
11180 pVmcsInfo->u64Cr4Mask = 0;
11181 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11182 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11183 }
11184
11185 NOREF(pVCpu);
11186}
11187
11188
11189/**
11190 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11191 * re-entry next time around.
11192 *
11193 * @returns Strict VBox status code (i.e. informational status codes too).
11194 * @param pVCpu The cross context virtual CPU structure.
11195 * @param pVmxTransient The VMX-transient structure.
11196 * @param pDbgState The debug state.
11197 * @param rcStrict The return code from executing the guest using single
11198 * stepping.
11199 */
11200static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11201 VBOXSTRICTRC rcStrict)
11202{
11203 /*
11204 * Restore VM-exit control settings as we may not reenter this function the
11205 * next time around.
11206 */
11207 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11208
11209 /* We reload the initial value, trigger what we can of recalculations the
11210 next time around. From the looks of things, that's all that's required atm. */
11211 if (pDbgState->fModifiedProcCtls)
11212 {
11213 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11214 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11215 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11216 AssertRC(rc2);
11217 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11218 }
11219
11220 /* We're currently the only ones messing with this one, so just restore the
11221 cached value and reload the field. */
11222 if ( pDbgState->fModifiedProcCtls2
11223 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11224 {
11225 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11226 AssertRC(rc2);
11227 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11228 }
11229
11230 /* If we've modified the exception bitmap, we restore it and trigger
11231 reloading and partial recalculation the next time around. */
11232 if (pDbgState->fModifiedXcptBitmap)
11233 {
11234 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11235 AssertRC(rc2);
11236 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11237 }
11238
11239 return rcStrict;
11240}
11241
11242
11243/**
11244 * Configures VM-exit controls for current DBGF and DTrace settings.
11245 *
11246 * This updates @a pDbgState and the VMCS execution control fields to reflect
11247 * the necessary VM-exits demanded by DBGF and DTrace.
11248 *
11249 * @param pVCpu The cross context virtual CPU structure.
11250 * @param pVmxTransient The VMX-transient structure. May update
11251 * fUpdatedTscOffsettingAndPreemptTimer.
11252 * @param pDbgState The debug state.
11253 */
11254static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11255{
11256#ifndef IN_NEM_DARWIN
11257 /*
11258 * Take down the dtrace serial number so we can spot changes.
11259 */
11260 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11261 ASMCompilerBarrier();
11262#endif
11263
11264 /*
11265 * We'll rebuild most of the middle block of data members (holding the
11266 * current settings) as we go along here, so start by clearing it all.
11267 */
11268 pDbgState->bmXcptExtra = 0;
11269 pDbgState->fCpe1Extra = 0;
11270 pDbgState->fCpe1Unwanted = 0;
11271 pDbgState->fCpe2Extra = 0;
11272 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11273 pDbgState->bmExitsToCheck[i] = 0;
11274
11275 /*
11276 * Software interrupts (INT XXh) - no idea how to trigger these...
11277 */
11278 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11279 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11280 || VBOXVMM_INT_SOFTWARE_ENABLED())
11281 {
11282 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11283 }
11284
11285 /*
11286 * INT3 breakpoints - triggered by #BP exceptions.
11287 */
11288 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11289 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11290
11291 /*
11292 * Exception bitmap and XCPT events+probes.
11293 */
11294 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11295 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11296 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11297
11298 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11299 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11300 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11301 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11302 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11303 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11304 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11305 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11306 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11307 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11308 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11309 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11310 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11311 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11312 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11313 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11314 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11315 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11316
11317 if (pDbgState->bmXcptExtra)
11318 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11319
11320 /*
11321 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11322 *
11323 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11324 * So, when adding/changing/removing please don't forget to update it.
11325 *
11326 * Some of the macros are picking up local variables to save horizontal space,
11327 * (being able to see it in a table is the lesser evil here).
11328 */
11329#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11330 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11331 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11332#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11333 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11334 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11335 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11336 } else do { } while (0)
11337#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11338 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11339 { \
11340 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11341 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11342 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11343 } else do { } while (0)
11344#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11345 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11346 { \
11347 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11348 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11349 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11350 } else do { } while (0)
11351#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11352 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11353 { \
11354 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11355 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11356 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11357 } else do { } while (0)
11358
11359 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11360 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11361 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11362 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11363 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11364
11365 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11366 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11367 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11368 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11369 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11370 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11371 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11372 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11373 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11374 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11375 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11376 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11377 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11378 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11379 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11380 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11381 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11382 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11383 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11384 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11385 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11386 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11387 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11388 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11389 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11390 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11391 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11392 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11393 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11394 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11395 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11396 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11397 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11398 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11399 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11400 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11401
11402 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11403 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11404 {
11405 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11406 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11407 AssertRC(rc);
11408
11409#if 0 /** @todo fix me */
11410 pDbgState->fClearCr0Mask = true;
11411 pDbgState->fClearCr4Mask = true;
11412#endif
11413 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11414 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11415 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11416 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11417 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11418 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11419 require clearing here and in the loop if we start using it. */
11420 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11421 }
11422 else
11423 {
11424 if (pDbgState->fClearCr0Mask)
11425 {
11426 pDbgState->fClearCr0Mask = false;
11427 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11428 }
11429 if (pDbgState->fClearCr4Mask)
11430 {
11431 pDbgState->fClearCr4Mask = false;
11432 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11433 }
11434 }
11435 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11436 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11437
11438 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11439 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11440 {
11441 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11442 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11443 }
11444 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11445 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11446
11447 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11448 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11449 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11450 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11451 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11452 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11453 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11454 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11455#if 0 /** @todo too slow, fix handler. */
11456 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11457#endif
11458 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11459
11460 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11461 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11462 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11463 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11464 {
11465 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11466 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11467 }
11468 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11469 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11470 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11471 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11472
11473 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11474 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11475 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11476 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11477 {
11478 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11479 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11480 }
11481 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11482 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11483 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11484 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11485
11486 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11487 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11488 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11489 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11490 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11491 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11492 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11493 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11494 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11495 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11496 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11497 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11498 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11499 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11500 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11501 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11502 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11503 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11504 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11505 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11506 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11507 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11508
11509#undef IS_EITHER_ENABLED
11510#undef SET_ONLY_XBM_IF_EITHER_EN
11511#undef SET_CPE1_XBM_IF_EITHER_EN
11512#undef SET_CPEU_XBM_IF_EITHER_EN
11513#undef SET_CPE2_XBM_IF_EITHER_EN
11514
11515 /*
11516 * Sanitize the control stuff.
11517 */
11518 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11519 if (pDbgState->fCpe2Extra)
11520 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11521 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11522 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11523#ifndef IN_NEM_DARWIN
11524 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11525 {
11526 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11527 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11528 }
11529#else
11530 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11531 {
11532 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11533 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11534 }
11535#endif
11536
11537 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11538 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11539 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11540 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11541}
11542
11543
11544/**
11545 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11546 * appropriate.
11547 *
11548 * The caller has checked the VM-exit against the
11549 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11550 * already, so we don't have to do that either.
11551 *
11552 * @returns Strict VBox status code (i.e. informational status codes too).
11553 * @param pVCpu The cross context virtual CPU structure.
11554 * @param pVmxTransient The VMX-transient structure.
11555 * @param uExitReason The VM-exit reason.
11556 *
11557 * @remarks The name of this function is displayed by dtrace, so keep it short
11558 * and to the point. No longer than 33 chars long, please.
11559 */
11560static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11561{
11562 /*
11563 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11564 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11565 *
11566 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11567 * does. Must add/change/remove both places. Same ordering, please.
11568 *
11569 * Added/removed events must also be reflected in the next section
11570 * where we dispatch dtrace events.
11571 */
11572 bool fDtrace1 = false;
11573 bool fDtrace2 = false;
11574 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11575 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11576 uint32_t uEventArg = 0;
11577#define SET_EXIT(a_EventSubName) \
11578 do { \
11579 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11580 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11581 } while (0)
11582#define SET_BOTH(a_EventSubName) \
11583 do { \
11584 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11585 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11586 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11587 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11588 } while (0)
11589 switch (uExitReason)
11590 {
11591 case VMX_EXIT_MTF:
11592 return vmxHCExitMtf(pVCpu, pVmxTransient);
11593
11594 case VMX_EXIT_XCPT_OR_NMI:
11595 {
11596 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11597 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11598 {
11599 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11600 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11601 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11602 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11603 {
11604 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11605 {
11606 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11607 uEventArg = pVmxTransient->uExitIntErrorCode;
11608 }
11609 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11610 switch (enmEvent1)
11611 {
11612 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11613 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11614 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11615 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11616 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11617 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11618 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11619 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11620 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11621 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11622 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11623 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11624 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11625 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11626 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11627 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11628 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11629 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11630 default: break;
11631 }
11632 }
11633 else
11634 AssertFailed();
11635 break;
11636
11637 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11638 uEventArg = idxVector;
11639 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11640 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11641 break;
11642 }
11643 break;
11644 }
11645
11646 case VMX_EXIT_TRIPLE_FAULT:
11647 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11648 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11649 break;
11650 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11651 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11652 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11653 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11654 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11655
11656 /* Instruction specific VM-exits: */
11657 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11658 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11659 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11660 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11661 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11662 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11663 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11664 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11665 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11666 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11667 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11668 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11669 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11670 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11671 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11672 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11673 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11674 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11675 case VMX_EXIT_MOV_CRX:
11676 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11677 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11678 SET_BOTH(CRX_READ);
11679 else
11680 SET_BOTH(CRX_WRITE);
11681 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11682 break;
11683 case VMX_EXIT_MOV_DRX:
11684 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11685 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11686 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11687 SET_BOTH(DRX_READ);
11688 else
11689 SET_BOTH(DRX_WRITE);
11690 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11691 break;
11692 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11693 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11694 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11695 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11696 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11697 case VMX_EXIT_GDTR_IDTR_ACCESS:
11698 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11699 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11700 {
11701 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11702 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11703 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11704 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11705 }
11706 break;
11707
11708 case VMX_EXIT_LDTR_TR_ACCESS:
11709 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11710 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11711 {
11712 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11713 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11714 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11715 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11716 }
11717 break;
11718
11719 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11720 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11721 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11722 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11723 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11724 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11725 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11726 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11727 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11728 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11729 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11730
11731 /* Events that aren't relevant at this point. */
11732 case VMX_EXIT_EXT_INT:
11733 case VMX_EXIT_INT_WINDOW:
11734 case VMX_EXIT_NMI_WINDOW:
11735 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11736 case VMX_EXIT_PREEMPT_TIMER:
11737 case VMX_EXIT_IO_INSTR:
11738 break;
11739
11740 /* Errors and unexpected events. */
11741 case VMX_EXIT_INIT_SIGNAL:
11742 case VMX_EXIT_SIPI:
11743 case VMX_EXIT_IO_SMI:
11744 case VMX_EXIT_SMI:
11745 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11746 case VMX_EXIT_ERR_MSR_LOAD:
11747 case VMX_EXIT_ERR_MACHINE_CHECK:
11748 case VMX_EXIT_PML_FULL:
11749 case VMX_EXIT_VIRTUALIZED_EOI:
11750 break;
11751
11752 default:
11753 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11754 break;
11755 }
11756#undef SET_BOTH
11757#undef SET_EXIT
11758
11759 /*
11760 * Dtrace tracepoints go first. We do them here at once so we don't
11761 * have to copy the guest state saving and stuff a few dozen times.
11762 * Down side is that we've got to repeat the switch, though this time
11763 * we use enmEvent since the probes are a subset of what DBGF does.
11764 */
11765 if (fDtrace1 || fDtrace2)
11766 {
11767 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11768 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11769 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; RT_NOREF(pCtx); /* Shut up Clang 13. */
11770 switch (enmEvent1)
11771 {
11772 /** @todo consider which extra parameters would be helpful for each probe. */
11773 case DBGFEVENT_END: break;
11774 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11775 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11776 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11777 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11778 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11779 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11780 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11781 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11782 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11783 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11784 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11785 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11786 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11787 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11788 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11789 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11790 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11791 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11792 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11793 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11794 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11795 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11796 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11797 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11798 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11799 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11800 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11801 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11802 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11803 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11804 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11805 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11806 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11807 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11808 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11809 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11810 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11811 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11812 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11813 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11814 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11815 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11816 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11817 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11818 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11819 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11820 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11821 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11822 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11823 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11824 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11825 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11826 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11827 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11828 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11829 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11830 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11831 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11832 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11833 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11834 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11835 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11836 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11837 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11838 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11839 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11840 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11841 }
11842 switch (enmEvent2)
11843 {
11844 /** @todo consider which extra parameters would be helpful for each probe. */
11845 case DBGFEVENT_END: break;
11846 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11847 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11848 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11849 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11850 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11851 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11852 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11853 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11854 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11855 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11856 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11857 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11858 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11859 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11860 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11861 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11862 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11863 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11864 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11865 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11866 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11867 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11868 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11869 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11870 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11871 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11872 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11873 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11874 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11875 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11876 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11877 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11878 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11879 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11880 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11881 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11882 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11883 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11884 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11885 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11886 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11887 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11888 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11889 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11890 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11891 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11892 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11893 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11894 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11895 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11896 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11897 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11898 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11899 }
11900 }
11901
11902 /*
11903 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11904 * the DBGF call will do a full check).
11905 *
11906 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11907 * Note! If we have to events, we prioritize the first, i.e. the instruction
11908 * one, in order to avoid event nesting.
11909 */
11910 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11911 if ( enmEvent1 != DBGFEVENT_END
11912 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11913 {
11914 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11915 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11916 if (rcStrict != VINF_SUCCESS)
11917 return rcStrict;
11918 }
11919 else if ( enmEvent2 != DBGFEVENT_END
11920 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11921 {
11922 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11923 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11924 if (rcStrict != VINF_SUCCESS)
11925 return rcStrict;
11926 }
11927
11928 return VINF_SUCCESS;
11929}
11930
11931
11932/**
11933 * Single-stepping VM-exit filtering.
11934 *
11935 * This is preprocessing the VM-exits and deciding whether we've gotten far
11936 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11937 * handling is performed.
11938 *
11939 * @returns Strict VBox status code (i.e. informational status codes too).
11940 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11941 * @param pVmxTransient The VMX-transient structure.
11942 * @param pDbgState The debug state.
11943 */
11944DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11945{
11946 /*
11947 * Expensive (saves context) generic dtrace VM-exit probe.
11948 */
11949 uint32_t const uExitReason = pVmxTransient->uExitReason;
11950 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11951 { /* more likely */ }
11952 else
11953 {
11954 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11955 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11956 AssertRC(rc);
11957 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11958 }
11959
11960#ifndef IN_NEM_DARWIN
11961 /*
11962 * Check for host NMI, just to get that out of the way.
11963 */
11964 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11965 { /* normally likely */ }
11966 else
11967 {
11968 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11969 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11970 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11971 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11972 }
11973#endif
11974
11975 /*
11976 * Check for single stepping event if we're stepping.
11977 */
11978 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11979 {
11980 switch (uExitReason)
11981 {
11982 case VMX_EXIT_MTF:
11983 return vmxHCExitMtf(pVCpu, pVmxTransient);
11984
11985 /* Various events: */
11986 case VMX_EXIT_XCPT_OR_NMI:
11987 case VMX_EXIT_EXT_INT:
11988 case VMX_EXIT_TRIPLE_FAULT:
11989 case VMX_EXIT_INT_WINDOW:
11990 case VMX_EXIT_NMI_WINDOW:
11991 case VMX_EXIT_TASK_SWITCH:
11992 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11993 case VMX_EXIT_APIC_ACCESS:
11994 case VMX_EXIT_EPT_VIOLATION:
11995 case VMX_EXIT_EPT_MISCONFIG:
11996 case VMX_EXIT_PREEMPT_TIMER:
11997
11998 /* Instruction specific VM-exits: */
11999 case VMX_EXIT_CPUID:
12000 case VMX_EXIT_GETSEC:
12001 case VMX_EXIT_HLT:
12002 case VMX_EXIT_INVD:
12003 case VMX_EXIT_INVLPG:
12004 case VMX_EXIT_RDPMC:
12005 case VMX_EXIT_RDTSC:
12006 case VMX_EXIT_RSM:
12007 case VMX_EXIT_VMCALL:
12008 case VMX_EXIT_VMCLEAR:
12009 case VMX_EXIT_VMLAUNCH:
12010 case VMX_EXIT_VMPTRLD:
12011 case VMX_EXIT_VMPTRST:
12012 case VMX_EXIT_VMREAD:
12013 case VMX_EXIT_VMRESUME:
12014 case VMX_EXIT_VMWRITE:
12015 case VMX_EXIT_VMXOFF:
12016 case VMX_EXIT_VMXON:
12017 case VMX_EXIT_MOV_CRX:
12018 case VMX_EXIT_MOV_DRX:
12019 case VMX_EXIT_IO_INSTR:
12020 case VMX_EXIT_RDMSR:
12021 case VMX_EXIT_WRMSR:
12022 case VMX_EXIT_MWAIT:
12023 case VMX_EXIT_MONITOR:
12024 case VMX_EXIT_PAUSE:
12025 case VMX_EXIT_GDTR_IDTR_ACCESS:
12026 case VMX_EXIT_LDTR_TR_ACCESS:
12027 case VMX_EXIT_INVEPT:
12028 case VMX_EXIT_RDTSCP:
12029 case VMX_EXIT_INVVPID:
12030 case VMX_EXIT_WBINVD:
12031 case VMX_EXIT_XSETBV:
12032 case VMX_EXIT_RDRAND:
12033 case VMX_EXIT_INVPCID:
12034 case VMX_EXIT_VMFUNC:
12035 case VMX_EXIT_RDSEED:
12036 case VMX_EXIT_XSAVES:
12037 case VMX_EXIT_XRSTORS:
12038 {
12039 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12040 AssertRCReturn(rc, rc);
12041 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12042 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12043 return VINF_EM_DBG_STEPPED;
12044 break;
12045 }
12046
12047 /* Errors and unexpected events: */
12048 case VMX_EXIT_INIT_SIGNAL:
12049 case VMX_EXIT_SIPI:
12050 case VMX_EXIT_IO_SMI:
12051 case VMX_EXIT_SMI:
12052 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12053 case VMX_EXIT_ERR_MSR_LOAD:
12054 case VMX_EXIT_ERR_MACHINE_CHECK:
12055 case VMX_EXIT_PML_FULL:
12056 case VMX_EXIT_VIRTUALIZED_EOI:
12057 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12058 break;
12059
12060 default:
12061 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12062 break;
12063 }
12064 }
12065
12066 /*
12067 * Check for debugger event breakpoints and dtrace probes.
12068 */
12069 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12070 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12071 {
12072 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12073 if (rcStrict != VINF_SUCCESS)
12074 return rcStrict;
12075 }
12076
12077 /*
12078 * Normal processing.
12079 */
12080#ifdef HMVMX_USE_FUNCTION_TABLE
12081 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12082#else
12083 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12084#endif
12085}
12086
12087/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette